HDFS-13958. Miscellaneous Improvements for FsVolumeSpi. Contributed by BELUGA 
BEHR.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73c660b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73c660b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73c660b4

Branch: refs/heads/HEAD
Commit: 73c660b43f3d5311947d2acc9488f17c1caf4de0
Parents: f13e231
Author: Inigo Goiri <inigo...@apache.org>
Authored: Fri Oct 5 09:32:19 2018 -0700
Committer: Inigo Goiri <inigo...@apache.org>
Committed: Fri Oct 5 09:32:19 2018 -0700

----------------------------------------------------------------------
 .../hdfs/server/datanode/DirectoryScanner.java  |  18 +-
 .../server/datanode/fsdataset/FsVolumeSpi.java  |  36 ++-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   | 203 +++++++++--------
 .../fsdataset/impl/ProvidedVolumeImpl.java      |  50 ++---
 .../server/datanode/SimulatedFSDataset.java     | 222 +++++++++----------
 .../server/datanode/TestDirectoryScanner.java   |   6 +-
 .../datanode/extdataset/ExternalVolumeImpl.java |  10 +-
 7 files changed, 251 insertions(+), 294 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73c660b4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index 484899d..7ae9e45 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import java.io.File;
-import java.io.FilenameFilter;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -26,7 +24,6 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -46,7 +43,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi.ScanInfo;
@@ -657,7 +653,7 @@ public class DirectoryScanner implements Runnable {
       perfTimer.start();
       throttleTimer.start();
       for (String bpid : bpList) {
-        LinkedList<ScanInfo> report = new LinkedList<>();
+        List<ScanInfo> report = new ArrayList<>(DEFAULT_MAP_SIZE);
 
         perfTimer.reset().start();
         throttleTimer.reset().start();
@@ -720,16 +716,4 @@ public class DirectoryScanner implements Runnable {
       perfTimer.reset().start();
     }
   }
-
-  public enum BlockDirFilter implements FilenameFilter {
-    INSTANCE;
-
-    @Override
-    public boolean accept(File dir, String name) {
-      return name.startsWith(DataStorage.BLOCK_SUBDIR_PREFIX)
-          || name.startsWith(DataStorage.STORAGE_DIR_FINALIZED)
-          || name.startsWith(Block.BLOCK_FILE_PREFIX);
-    }
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73c660b4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
index 20a153d..7329ba3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsVolumeSpi.java
@@ -22,7 +22,7 @@ import java.io.File;
 import java.io.IOException;
 import java.net.URI;
 import java.nio.channels.ClosedChannelException;
-import java.util.LinkedList;
+import java.util.Collection;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -32,9 +32,9 @@ import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
 import org.apache.hadoop.hdfs.server.common.FileRegion;
 import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
+import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.checker.Checkable;
 import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
@@ -362,13 +362,13 @@ public interface FsVolumeSpi
     public File getMetaFile() {
       if (metaSuffix == null) {
         return null;
-      } else if (blockSuffix == null) {
-        return new File(new File(volume.getBaseURI()).getAbsolutePath(),
-            metaSuffix);
-      } else {
-        return new File(new File(volume.getBaseURI()).getAbsolutePath(),
-            blockSuffix + metaSuffix);
       }
+      String fileSuffix = metaSuffix;
+      if (blockSuffix != null) {
+        fileSuffix = blockSuffix + metaSuffix;
+      }
+      return new File(new File(volume.getBaseURI()).getAbsolutePath(),
+          fileSuffix);
     }
 
     /**
@@ -389,18 +389,12 @@ public interface FsVolumeSpi
       return volume;
     }
 
-    @Override // Comparable
+    @Override
     public int compareTo(ScanInfo b) {
-      if (blockId < b.blockId) {
-        return -1;
-      } else if (blockId == b.blockId) {
-        return 0;
-      } else {
-        return 1;
-      }
+      return Long.compare(this.blockId, b.blockId);
     }
 
-    @Override // Object
+    @Override
     public boolean equals(Object o) {
       if (this == o) {
         return true;
@@ -411,9 +405,9 @@ public interface FsVolumeSpi
       return blockId == ((ScanInfo) o).blockId;
     }
 
-    @Override // Object
+    @Override
     public int hashCode() {
-      return (int)(blockId^(blockId>>>32));
+      return Long.hashCode(this.blockId);
     }
 
     public long getGenStamp() {
@@ -447,8 +441,8 @@ public interface FsVolumeSpi
    * @param reportCompiler
    * @throws IOException
    */
-  LinkedList<ScanInfo> compileReport(String bpid,
-      LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
+  void compileReport(String bpid,
+      Collection<ScanInfo> report, ReportCompiler reportCompiler)
       throws InterruptedException, IOException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73c660b4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 9969976..517bbc6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -28,8 +28,8 @@ import java.net.URI;
 import java.nio.channels.ClosedChannelException;
 import java.nio.file.Paths;
 import java.nio.file.StandardCopyOption;
+import java.util.Collection;
 import java.util.Collections;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -46,38 +46,37 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.DF;
 import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
-import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
-import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
-import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
-import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
 import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
+import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
+import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
+import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
 import org.apache.hadoop.hdfs.server.datanode.LocalReplica;
-import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
-import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.util.DataChecksum;
-import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
-import org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder;
 import org.apache.hadoop.hdfs.server.datanode.LocalReplicaInPipeline;
+import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
+import org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
+import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
-import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.BlockDirFilter;
-import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
+import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTracker.RamDiskReplica;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.util.CloseableReferenceCount;
+import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
+import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Timer;
 import org.slf4j.Logger;
@@ -94,7 +93,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 /**
  * The underlying volume used to store replica.
- * 
+ *
  * It uses the {@link FsDatasetImpl} object for synchronization.
  */
 @InterfaceAudience.Private
@@ -311,11 +310,8 @@ public class FsVolumeImpl implements FsVolumeSpi {
    */
   boolean checkClosed() {
     if (this.reference.getReferenceCount() > 0) {
-      if (FsDatasetImpl.LOG.isDebugEnabled()) {
-        FsDatasetImpl.LOG.debug(String.format(
-            "The reference count for %s is %d, wait to be 0.",
-            this, reference.getReferenceCount()));
-      }
+      FsDatasetImpl.LOG.debug("The reference count for {} is {}, wait to be 
0.",
+          this, reference.getReferenceCount());
       return false;
     }
     return true;
@@ -325,7 +321,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
   File getCurrentDir() {
     return currentDir;
   }
-  
+
   protected File getRbwDir(String bpid) throws IOException {
     return getBlockPoolSlice(bpid).getRbwDir();
   }
@@ -381,7 +377,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
   @VisibleForTesting
   public long getDfsUsed() throws IOException {
     long dfsUsed = 0;
-    for(BlockPoolSlice s : bpSlices.values()) {
+    for (BlockPoolSlice s : bpSlices.values()) {
       dfsUsed += s.getDfsUsed();
     }
     return dfsUsed;
@@ -390,21 +386,20 @@ public class FsVolumeImpl implements FsVolumeSpi {
   long getBlockPoolUsed(String bpid) throws IOException {
     return getBlockPoolSlice(bpid).getDfsUsed();
   }
-  
+
   /**
    * Return either the configured capacity of the file system if configured; or
    * the capacity of the file system excluding space reserved for non-HDFS.
-   * 
+   *
    * @return the unreserved number of bytes left in this filesystem. May be
    *         zero.
    */
   @VisibleForTesting
   public long getCapacity() {
-    if (configuredCapacity < 0) {
+    if (configuredCapacity < 0L) {
       long remaining = usage.getCapacity() - getReserved();
-      return remaining > 0 ? remaining : 0;
+      return Math.max(remaining, 0L);
     }
-
     return configuredCapacity;
   }
 
@@ -418,10 +413,10 @@ public class FsVolumeImpl implements FsVolumeSpi {
     this.configuredCapacity = capacity;
   }
 
-  /*
+  /**
    * Calculate the available space of the filesystem, excluding space reserved
-   * for non-HDFS and space reserved for RBW
-   * 
+   * for non-HDFS and space reserved for RBW.
+   *
    * @return the available number of bytes left in this filesystem. May be 
zero.
    */
   @Override
@@ -432,7 +427,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
     if (remaining > available) {
       remaining = available;
     }
-    return (remaining > 0) ? remaining : 0;
+    return Math.max(remaining, 0L);
   }
 
   long getActualNonDfsUsed() throws IOException {
@@ -458,10 +453,8 @@ public class FsVolumeImpl implements FsVolumeSpi {
   public long getNonDfsUsed() throws IOException {
     long actualNonDfsUsed = getActualNonDfsUsed();
     long actualReserved = getReserved();
-    if (actualNonDfsUsed < actualReserved) {
-      return 0L;
-    }
-    return actualNonDfsUsed - actualReserved;
+    long nonDfsUsed = actualNonDfsUsed - actualReserved;
+    return Math.max(nonDfsUsed, 0L);
   }
 
   @VisibleForTesting
@@ -503,7 +496,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
       try {
         return new DF(new File(currentDir.getParent()), conf);
       } catch (IOException e) {
-        LOG.error("Unable to get disk statistics for volume " + this);
+        LOG.error("Unable to get disk statistics for volume {}", this, e);
       }
     }
     return null;
@@ -525,11 +518,11 @@ public class FsVolumeImpl implements FsVolumeSpi {
   }
 
   /**
-   * Make a deep copy of the list of currently active BPIDs
+   * Make a deep copy of the list of currently active BPIDs.
    */
   @Override
   public String[] getBlockPoolList() {
-    return bpSlices.keySet().toArray(new String[bpSlices.keySet().size()]);   
+    return bpSlices.keySet().toArray(new String[0]);
   }
 
   /**
@@ -549,7 +542,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
 
   @Override
   public void reserveSpaceForReplica(long bytesToReserve) {
-    if (bytesToReserve != 0) {
+    if (bytesToReserve != 0L) {
       reservedForReplicas.addAndGet(bytesToReserve);
       recentReserved = bytesToReserve;
     }
@@ -557,17 +550,15 @@ public class FsVolumeImpl implements FsVolumeSpi {
 
   @Override
   public void releaseReservedSpace(long bytesToRelease) {
-    if (bytesToRelease != 0) {
-
+    if (bytesToRelease != 0L) {
       long oldReservation, newReservation;
       do {
         oldReservation = reservedForReplicas.get();
         newReservation = oldReservation - bytesToRelease;
-        if (newReservation < 0) {
-          // Failsafe, this should never occur in practice, but if it does we
-          // don't want to start advertising more space than we have available.
-          newReservation = 0;
-        }
+
+        // Fail-safe, this should never be less than zero in practice, but if 
it
+        // does, do not advertise more space than is have available.
+        newReservation = Math.max(newReservation, 0L);
       } while (!reservedForReplicas.compareAndSet(oldReservation,
           newReservation));
     }
@@ -679,20 +670,15 @@ public class FsVolumeImpl implements FsVolumeSpi {
           FsVolumeImpl.this, dir, SubdirFilter.INSTANCE);
       cache = null;
       cacheMs = 0;
-      if (children.size() == 0) {
+      if (children.isEmpty()) {
         LOG.trace("getNextSubDir({}, {}): no subdirectories found in {}",
             storageID, bpid, dir.getAbsolutePath());
         return null;
       }
       Collections.sort(children);
       String nextSubDir = nextSorted(children, prev);
-      if (nextSubDir == null) {
-        LOG.trace("getNextSubDir({}, {}): no more subdirectories found in {}",
-            storageID, bpid, dir.getAbsolutePath());
-      } else {
-        LOG.trace("getNextSubDir({}, {}): picking next subdirectory {} " +
-            "within {}", storageID, bpid, nextSubDir, dir.getAbsolutePath());
-      }
+      LOG.trace("getNextSubDir({}, {}): picking next subdirectory {} within 
{}",
+          storageID, bpid, nextSubDir, dir.getAbsolutePath());
       return nextSubDir;
     }
 
@@ -731,16 +717,13 @@ public class FsVolumeImpl implements FsVolumeSpi {
                     state.curFinalizedDir, state.curFinalizedSubDir).toFile();
       List<String> entries = fileIoProvider.listDirectory(
           FsVolumeImpl.this, dir, BlockFileFilter.INSTANCE);
-      if (entries.size() == 0) {
+      if (entries.isEmpty()) {
         entries = null;
+        LOG.trace("getSubdirEntries({}, {}): no entries found in {}", 
storageID,
+            bpid, dir.getAbsolutePath());
       } else {
         Collections.sort(entries);
-      }
-      if (entries == null) {
-        LOG.trace("getSubdirEntries({}, {}): no entries found in {}",
-            storageID, bpid, dir.getAbsolutePath());
-      } else {
-        LOG.trace("getSubdirEntries({}, {}): listed {} entries in {}", 
+        LOG.trace("getSubdirEntries({}, {}): listed {} entries in {}",
             storageID, bpid, entries.size(), dir.getAbsolutePath());
       }
       cache = entries;
@@ -872,9 +855,11 @@ public class FsVolumeImpl implements FsVolumeSpi {
     public void load() throws IOException {
       File file = getSaveFile();
       this.state = READER.readValue(file);
-      LOG.trace("load({}, {}): loaded iterator {} from {}: {}", storageID,
-          bpid, name, file.getAbsoluteFile(),
-          WRITER.writeValueAsString(state));
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("load({}, {}): loaded iterator {} from {}: {}", storageID,
+            bpid, name, file.getAbsoluteFile(),
+            WRITER.writeValueAsString(state));
+      }
     }
 
     File getSaveFile() {
@@ -956,15 +941,21 @@ public class FsVolumeImpl implements FsVolumeSpi {
       long bytesReserved) throws IOException {
     releaseReservedSpace(bytesReserved);
     File dest = getBlockPoolSlice(bpid).addFinalizedBlock(b, replicaInfo);
-    byte[] checksum = null;
+    final byte[] checksum;
     // copy the last partial checksum if the replica is originally
     // in finalized or rbw state.
-    if (replicaInfo.getState() == ReplicaState.FINALIZED) {
-      FinalizedReplica finalized = (FinalizedReplica)replicaInfo;
+    switch (replicaInfo.getState()) {
+    case FINALIZED:
+      FinalizedReplica finalized = (FinalizedReplica) replicaInfo;
       checksum = finalized.getLastPartialChunkChecksum();
-    } else if (replicaInfo.getState() == ReplicaState.RBW) {
-      ReplicaBeingWritten rbw = (ReplicaBeingWritten)replicaInfo;
+      break;
+    case RBW:
+      ReplicaBeingWritten rbw = (ReplicaBeingWritten) replicaInfo;
       checksum = rbw.getLastChecksumAndDataLen().getChecksum();
+      break;
+    default:
+      checksum = null;
+      break;
     }
 
     return new ReplicaBuilder(ReplicaState.FINALIZED)
@@ -983,28 +974,26 @@ public class FsVolumeImpl implements FsVolumeSpi {
   public VolumeCheckResult check(VolumeCheckContext ignored)
       throws DiskErrorException {
     // TODO:FEDERATION valid synchronization
-    for(BlockPoolSlice s : bpSlices.values()) {
+    for (BlockPoolSlice s : bpSlices.values()) {
       s.checkDirs();
     }
     return VolumeCheckResult.HEALTHY;
   }
-    
+
   void getVolumeMap(ReplicaMap volumeMap,
-                    final RamDiskReplicaTracker ramDiskReplicaMap)
-      throws IOException {
-    for(BlockPoolSlice s : bpSlices.values()) {
+      final RamDiskReplicaTracker ramDiskReplicaMap) throws IOException {
+    for (BlockPoolSlice s : bpSlices.values()) {
       s.getVolumeMap(volumeMap, ramDiskReplicaMap);
     }
   }
-  
+
   void getVolumeMap(String bpid, ReplicaMap volumeMap,
-                    final RamDiskReplicaTracker ramDiskReplicaMap)
-      throws IOException {
+      final RamDiskReplicaTracker ramDiskReplicaMap) throws IOException {
     getBlockPoolSlice(bpid).getVolumeMap(volumeMap, ramDiskReplicaMap);
   }
 
   long getNumBlocks() {
-    long numBlocks = 0;
+    long numBlocks = 0L;
     for (BlockPoolSlice s : bpSlices.values()) {
       numBlocks += s.getNumOfBlocks();
     }
@@ -1038,13 +1027,12 @@ public class FsVolumeImpl implements FsVolumeSpi {
     File bpdir = new File(currentDir, bpid);
     BlockPoolSlice bp;
     if (timer == null) {
-      bp = new BlockPoolSlice(bpid, this, bpdir, c, new Timer());
-    } else {
-      bp = new BlockPoolSlice(bpid, this, bpdir, c, timer);
+      timer = new Timer();
     }
+    bp = new BlockPoolSlice(bpid, this, bpdir, c, timer);
     bpSlices.put(bpid, bp);
   }
-  
+
   void shutdownBlockPool(String bpid, BlockListAsLongs blocksListsAsLongs) {
     BlockPoolSlice bp = bpSlices.get(bpid);
     if (bp != null) {
@@ -1070,7 +1058,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
     }
     return true;
   }
-  
+
   void deleteBPDirectories(String bpid, boolean force) throws IOException {
     File volumeCurrentDir = this.getCurrentDir();
     File bpDir = new File(volumeCurrentDir, bpid);
@@ -1078,7 +1066,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
       // nothing to be deleted
       return;
     }
-    File tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP); 
+    File tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
     File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
     File finalizedDir = new File(bpCurrentDir,
         DataStorage.STORAGE_DIR_FINALIZED);
@@ -1127,17 +1115,16 @@ public class FsVolumeImpl implements FsVolumeSpi {
   public String getStorageID() {
     return storageID;
   }
-  
+
   @Override
   public StorageType getStorageType() {
     return storageType;
   }
-  
+
   DatanodeStorage toDatanodeStorage() {
     return new DatanodeStorage(storageID, DatanodeStorage.State.NORMAL, 
storageType);
   }
 
-
   @Override
   public byte[] loadLastPartialChunkChecksum(
       File blockFile, File metaFile) throws IOException {
@@ -1313,11 +1300,10 @@ public class FsVolumeImpl implements FsVolumeSpi {
   }
 
   @Override
-  public LinkedList<ScanInfo> compileReport(String bpid,
-      LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
-      throws InterruptedException, IOException {
-    return compileReport(getFinalizedDir(bpid),
-        getFinalizedDir(bpid), report, reportCompiler);
+  public void compileReport(String bpid, Collection<ScanInfo> report,
+      ReportCompiler reportCompiler) throws InterruptedException, IOException {
+    compileReport(getFinalizedDir(bpid), getFinalizedDir(bpid), report,
+        reportCompiler);
   }
 
   @Override
@@ -1330,21 +1316,35 @@ public class FsVolumeImpl implements FsVolumeSpi {
     return metrics;
   }
 
-  private LinkedList<ScanInfo> compileReport(File bpFinalizedDir,
-      File dir, LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
-        throws InterruptedException {
+  /**
+   * Filter for block file names stored on the file system volumes.
+   */
+  public enum BlockDirFilter implements FilenameFilter {
+    INSTANCE;
+
+    @Override
+    public boolean accept(File dir, String name) {
+      return name.startsWith(DataStorage.BLOCK_SUBDIR_PREFIX)
+          || name.startsWith(DataStorage.STORAGE_DIR_FINALIZED)
+          || name.startsWith(Block.BLOCK_FILE_PREFIX);
+    }
+  }
+
+  private void compileReport(File bpFinalizedDir, File dir,
+      Collection<ScanInfo> report, ReportCompiler reportCompiler)
+      throws InterruptedException {
 
     reportCompiler.throttle();
 
     List <String> fileNames;
     try {
-      fileNames = fileIoProvider.listDirectory(
-          this, dir, BlockDirFilter.INSTANCE);
+      fileNames =
+          fileIoProvider.listDirectory(this, dir, BlockDirFilter.INSTANCE);
     } catch (IOException ioe) {
-      LOG.warn("Exception occurred while compiling report: ", ioe);
+      LOG.warn("Exception occurred while compiling report", ioe);
       // Volume error check moved to FileIoProvider.
       // Ignore this directory and proceed.
-      return report;
+      return;
     }
     Collections.sort(fileNames);
 
@@ -1396,7 +1396,6 @@ public class FsVolumeImpl implements FsVolumeSpi {
       verifyFileLocation(blockFile, bpFinalizedDir, blockId);
       report.add(new ScanInfo(blockId, blockFile, metaFile, this));
     }
-    return report;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73c660b4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
index e2d8681..420a9a5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ProvidedVolumeImpl.java
@@ -17,15 +17,17 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROVIDED_ALIASMAP_LOAD_RETRIES;
+
 import java.io.File;
 import java.io.IOException;
 import java.net.URI;
 import java.nio.ByteBuffer;
+import java.util.Collection;
 import java.util.Iterator;
-import java.util.LinkedList;
 import java.util.Map;
-import java.util.Set;
 import java.util.Map.Entry;
+import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicLong;
 
@@ -41,21 +43,23 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.common.FileRegion;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap;
 import 
org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
+import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
+import org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
-import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
-import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
-import org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder;
-import org.apache.hadoop.util.Timer;
-import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.AutoCloseableLock;
+import org.apache.hadoop.util.DiskChecker.DiskErrorException;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.Timer;
 import org.codehaus.jackson.annotate.JsonProperty;
 import org.codehaus.jackson.map.ObjectMapper;
 import org.codehaus.jackson.map.ObjectReader;
@@ -63,11 +67,6 @@ import org.codehaus.jackson.map.ObjectWriter;
 
 import com.google.common.annotations.VisibleForTesting;
 
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.hadoop.util.Time;
-
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PROVIDED_ALIASMAP_LOAD_RETRIES;
-
 /**
  * This class is used to create provided volumes.
  */
@@ -227,9 +226,9 @@ class ProvidedVolumeImpl extends FsVolumeImpl {
       // nothing to do!
     }
 
-    public void compileReport(LinkedList<ScanInfo> report,
+    public void compileReport(Collection<ScanInfo> report,
         ReportCompiler reportCompiler)
-            throws IOException, InterruptedException {
+        throws IOException, InterruptedException {
       /* refresh the aliasMap and return the list of blocks found.
        * the assumption here is that the block ids in the external
        * block map, after the refresh, are consistent with those
@@ -240,9 +239,8 @@ class ProvidedVolumeImpl extends FsVolumeImpl {
       BlockAliasMap.Reader<FileRegion> reader = aliasMap.getReader(null, bpid);
       for (FileRegion region : reader) {
         reportCompiler.throttle();
-        report.add(new ScanInfo(region.getBlock().getBlockId(),
-            providedVolume, region,
-            region.getProvidedStorageLocation().getLength()));
+        report.add(new ScanInfo(region.getBlock().getBlockId(), providedVolume,
+            region, region.getProvidedStorageLocation().getLength()));
       }
     }
 
@@ -336,7 +334,7 @@ class ProvidedVolumeImpl extends FsVolumeImpl {
 
   @Override
   long getNumBlocks() {
-    long numBlocks = 0;
+    long numBlocks = 0L;
     for (ProvidedBlockPoolSlice s : bpSlices.values()) {
       numBlocks += s.getNumOfBlocks();
     }
@@ -381,7 +379,7 @@ class ProvidedVolumeImpl extends FsVolumeImpl {
       iterStartMs = Time.now();
       lastSavedMs = iterStartMs;
       atEnd = false;
-      lastBlockId = -1;
+      lastBlockId = -1L;
     }
 
     // The wall-clock ms since the epoch at which this iterator was last saved.
@@ -533,7 +531,7 @@ class ProvidedVolumeImpl extends FsVolumeImpl {
       final RamDiskReplicaTracker ramDiskReplicaMap)
           throws IOException {
     LOG.info("Creating volumemap for provided volume " + this);
-    for(ProvidedBlockPoolSlice s : bpSlices.values()) {
+    for (ProvidedBlockPoolSlice s : bpSlices.values()) {
       s.fetchVolumeMap(volumeMap, ramDiskReplicaMap, remoteFS);
     }
   }
@@ -611,14 +609,12 @@ class ProvidedVolumeImpl extends FsVolumeImpl {
   }
 
   @Override
-  public LinkedList<ScanInfo> compileReport(String bpid,
-      LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
-      throws InterruptedException, IOException {
-    LOG.info("Compiling report for volume: " + this + " bpid " + bpid);
-    if(bpSlices.containsKey(bpid)) {
+  public void compileReport(String bpid, Collection<ScanInfo> report,
+      ReportCompiler reportCompiler) throws InterruptedException, IOException {
+    LOG.info("Compiling report for volume: {}; bpid: {}", this, bpid);
+    if (bpSlices.containsKey(bpid)) {
       bpSlices.get(bpid).compileReport(report, reportCompiler);
     }
-    return report;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73c660b4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index eb9461f..0dc2733 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -25,10 +25,11 @@ import java.net.URI;
 import java.nio.channels.ClosedChannelException;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.HashMap;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Set;
 import java.util.TreeMap;
 import java.util.concurrent.ConcurrentHashMap;
@@ -75,18 +76,18 @@ import org.apache.hadoop.util.DataChecksum;
 
 /**
  * This class implements a simulated FSDataset.
- * 
+ *
  * Blocks that are created are recorded but their data (plus their CRCs) are
  *  discarded.
  * Fixed data is returned when blocks are read; a null CRC meta file is
  * created for such data.
- * 
+ *
  * This FSDataset does not remember any block information across its
  * restarts; it does however offer an operation to inject blocks
  *  (See the TestInectionForSImulatedStorage()
  * for a usage example of injection.
- * 
- * Note the synchronization is coarse grained - it is at each method. 
+ *
+ * Note the synchronization is coarse grained - it is at each method.
  */
 public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
   public final static int BYTE_MASK = 0xff;
@@ -145,17 +146,17 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
     byte firstByte = (byte) (b.getBlockId() & BYTE_MASK);
     return (byte) ((firstByte + offsetInBlk % 29) & BYTE_MASK);
   }
-  
+
   public static final String CONFIG_PROPERTY_CAPACITY =
       "dfs.datanode.simulateddatastorage.capacity";
-  
+
   public static final long DEFAULT_CAPACITY = 2L<<40; // 1 terabyte
-  
+
   public static final String CONFIG_PROPERTY_STATE =
       "dfs.datanode.simulateddatastorage.state";
   private static final DatanodeStorage.State DEFAULT_STATE =
       DatanodeStorage.State.NORMAL;
-  
+
   static final byte[] nullCrcFileData;
 
   private final AutoCloseableLock datasetLock;
@@ -183,8 +184,8 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
     private boolean pinned = false;
     BInfo(String bpid, Block b, boolean forWriting) throws IOException {
       theBlock = new Block(b);
-      if (theBlock.getNumBytes() < 0) {
-        theBlock.setNumBytes(0);
+      if (theBlock.getNumBytes() < 0L) {
+        theBlock.setNumBytes(0L);
       }
       if (!getStorage(theBlock).alloc(bpid, theBlock.getNumBytes())) {
         // expected length - actual length may
@@ -201,7 +202,7 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
         oStream = null;
       }
     }
-    
+
     @Override
     public String getStorageUuid() {
       return getStorage(theBlock).getStorageUuid();
@@ -229,7 +230,7 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
         theBlock.setNumBytes(length);
       }
     }
-    
+
     synchronized SimulatedInputStream getIStream() {
       if (!finalized) {
         // throw new IOException("Trying to read an unfinalized block");
@@ -238,12 +239,12 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
         return new SimulatedInputStream(theBlock.getNumBytes(), theBlock);
       }
     }
-    
+
     synchronized void finalizeBlock(String bpid, long finalSize)
         throws IOException {
       if (finalized) {
         throw new IOException(
-            "Finalizing a block that has already been finalized" + 
+            "Finalizing a block that has already been finalized" +
             theBlock.getBlockId());
       }
       if (oStream == null) {
@@ -257,10 +258,10 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
         throw new IOException(
           "Size passed to finalize does not match the amount of data written");
       }
-      // We had allocated the expected length when block was created; 
+      // We had allocated the expected length when block was created;
       // adjust if necessary
       long extraLen = finalSize - theBlock.getNumBytes();
-      if (extraLen > 0) {
+      if (extraLen > 0L) {
         if (!getStorage(theBlock).alloc(bpid, extraLen)) {
           DataNode.LOG.warn("Lack of free storage on a block alloc");
           throw new IOException("Creating block, no free space available");
@@ -268,7 +269,7 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
       } else {
         getStorage(theBlock).free(bpid, -extraLen);
       }
-      theBlock.setNumBytes(finalSize);  
+      theBlock.setNumBytes(finalSize);
 
       finalized = true;
       oStream = null;
@@ -289,7 +290,7 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
     }
 
     SimulatedInputStream getMetaIStream() {
-      return new SimulatedInputStream(nullCrcFileData);  
+      return new SimulatedInputStream(nullCrcFileData);
     }
 
     synchronized boolean isFinalized() {
@@ -297,7 +298,7 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
     }
 
     @Override
-    synchronized public ReplicaOutputStreams createStreams(boolean isCreate, 
+    synchronized public ReplicaOutputStreams createStreams(boolean isCreate,
         DataChecksum requestedChecksum)
         throws IOException {
       if (finalized) {
@@ -396,36 +397,37 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
     public void stopWriter(long xceiverStopTimeout) throws IOException {
     }
   }
-  
+
   /**
    * Class is used for tracking block pool storage utilization similar
    * to {@link BlockPoolSlice}
    */
   private static class SimulatedBPStorage {
-    private long used;    // in bytes
+    // in bytes
+    private long used;
     private final Map<Block, BInfo> blockMap = new TreeMap<>();
-    
+
     long getUsed() {
       return used;
     }
-    
+
     void alloc(long amount) {
       used += amount;
     }
-    
+
     void free(long amount) {
       used -= amount;
     }
-    
+
     Map<Block, BInfo> getBlockMap() {
       return blockMap;
     }
 
     SimulatedBPStorage() {
-      used = 0;   
+      used = 0L;
     }
   }
-  
+
   /**
    * Class used for tracking datanode level storage utilization similar
    * to {@link FSVolumeSet}
@@ -437,27 +439,27 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
     private final long capacity;  // in bytes
     private final DatanodeStorage dnStorage;
     private final SimulatedVolume volume;
-    
+
     synchronized long getFree() {
       return capacity - getUsed();
     }
-    
+
     long getCapacity() {
       return capacity;
     }
-    
+
     synchronized long getUsed() {
-      long used = 0;
+      long used = 0L;
       for (SimulatedBPStorage bpStorage : map.values()) {
         used += bpStorage.getUsed();
       }
       return used;
     }
-    
+
     synchronized long getBlockPoolUsed(String bpid) throws IOException {
       return getBPStorage(bpid).getUsed();
     }
-    
+
     int getNumFailedVolumes() {
       return 0;
     }
@@ -467,13 +469,13 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
         getBPStorage(bpid).alloc(amount);
         return true;
       }
-      return false;    
+      return false;
     }
-    
+
     synchronized void free(String bpid, long amount) throws IOException {
       getBPStorage(bpid).free(amount);
     }
-    
+
     SimulatedStorage(long cap, DatanodeStorage.State state,
         FileIoProvider fileIoProvider, Configuration conf) {
       capacity = cap;
@@ -484,7 +486,7 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
           DataNodeVolumeMetrics.create(conf, dnStorage.getStorageID());
       this.volume = new SimulatedVolume(this, fileIoProvider, volumeMetrics);
     }
-    
+
     synchronized void addBlockPool(String bpid) {
       SimulatedBPStorage bpStorage = map.get(bpid);
       if (bpStorage != null) {
@@ -492,11 +494,11 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
       }
       map.put(bpid, new SimulatedBPStorage());
     }
-    
+
     synchronized void removeBlockPool(String bpid) {
       map.remove(bpid);
     }
-    
+
     private SimulatedBPStorage getBPStorage(String bpid) throws IOException {
       SimulatedBPStorage bpStorage = map.get(bpid);
       if (bpStorage == null) {
@@ -508,7 +510,7 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
     String getStorageUuid() {
       return dnStorage.getStorageID();
     }
-    
+
     DatanodeStorage getDnStorage() {
       return dnStorage;
     }
@@ -531,7 +533,7 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
       return bpStorage.getBlockMap();
     }
   }
-  
+
   static class SimulatedVolume implements FsVolumeSpi {
     private final SimulatedStorage storage;
     private final FileIoProvider fileIoProvider;
@@ -635,10 +637,9 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
     }
 
     @Override
-    public LinkedList<ScanInfo> compileReport(String bpid,
-        LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
+    public void compileReport(String bpid,
+        Collection<ScanInfo> report, ReportCompiler reportCompiler)
         throws InterruptedException, IOException {
-      return null;
     }
 
     @Override
@@ -661,7 +662,6 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
   private final List<SimulatedStorage> storages;
   private final String datanodeUuid;
   private final DataNode datanode;
-  
 
   public SimulatedFSDataset(DataStorage storage, Configuration conf) {
     this(null, storage, conf);
@@ -724,7 +724,7 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
   private SimulatedStorage getStorage(Block b) {
     return storages.get(LongMath.mod(b.getBlockId(), storages.size()));
   }
-  
+
   /**
    * Get the block map that a given block lives within, assuming it is within
    * block pool bpid.
@@ -792,12 +792,12 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
 
   @Override // FsDatasetSpi
   public List<Long> getCacheReport(String bpid) {
-    return new LinkedList<Long>();
+    return Collections.emptyList();
   }
 
   @Override // FSDatasetMBean
   public long getCapacity() {
-    long total = 0;
+    long total = 0L;
     for (SimulatedStorage storage : storages) {
       total += storage.getCapacity();
     }
@@ -806,7 +806,7 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
 
   @Override // FSDatasetMBean
   public long getDfsUsed() {
-    long total = 0;
+    long total = 0L;
     for (SimulatedStorage storage : storages) {
       total += storage.getUsed();
     }
@@ -815,17 +815,16 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
 
   @Override // FSDatasetMBean
   public long getBlockPoolUsed(String bpid) throws IOException {
-    long total = 0;
+    long total = 0L;
     for (SimulatedStorage storage : storages) {
       total += storage.getBlockPoolUsed(bpid);
     }
     return total;
   }
-  
+
   @Override // FSDatasetMBean
   public long getRemaining() {
-
-    long total = 0;
+    long total = 0L;
     for (SimulatedStorage storage : storages) {
       total += storage.getFree();
     }
@@ -834,7 +833,6 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
 
   @Override // FSDatasetMBean
   public int getNumFailedVolumes() {
-
     int total = 0;
     for (SimulatedStorage storage : storages) {
       total += storage.getNumFailedVolumes();
@@ -849,12 +847,12 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
 
   @Override // FSDatasetMBean
   public long getLastVolumeFailureDate() {
-    return 0;
+    return 0L;
   }
 
   @Override // FSDatasetMBean
   public long getEstimatedCapacityLostTotal() {
-    return 0;
+    return 0L;
   }
 
   @Override // FsDatasetSpi
@@ -864,27 +862,27 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
 
   @Override // FSDatasetMBean
   public long getCacheUsed() {
-    return 0l;
+    return 0L;
   }
 
   @Override // FSDatasetMBean
   public long getCacheCapacity() {
-    return 0l;
+    return 0L;
   }
 
   @Override // FSDatasetMBean
   public long getNumBlocksCached() {
-    return 0l;
+    return 0L;
   }
 
   @Override
   public long getNumBlocksFailedToCache() {
-    return 0l;
+    return 0L;
   }
 
   @Override
   public long getNumBlocksFailedToUncache() {
-    return 0l;
+    return 0L;
   }
 
   /**
@@ -922,7 +920,7 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
     }
   }
 
-  @Override 
+  @Override
   public synchronized String getReplicaString(String bpid, long blockId) {
     Replica r = null;
     try {
@@ -931,7 +929,7 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
     } catch (IOException ioe) {
       // Ignore
     }
-    return r == null? "null": r.toString();
+    return Objects.toString(r);
   }
 
   @Override // FsDatasetSpi
@@ -1011,21 +1009,21 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
   /**
    * Check if a block is valid.
    *
-   * @param b           The block to check.
-   * @param minLength   The minimum length that the block must have.  May be 0.
-   * @param state       If this is null, it is ignored.  If it is non-null, we
-   *                        will check that the replica has this state.
+   * @param b The block to check.
+   * @param minLength The minimum length that the block must have. May be 0.
+   * @param state If this is null, it is ignored. If it is non-null, we will
+   *          check that the replica has this state.
    *
-   * @throws ReplicaNotFoundException          If the replica is not found
+   * @throws ReplicaNotFoundException If the replica is not found
    *
-   * @throws UnexpectedReplicaStateException   If the replica is not in the 
-   *                                             expected state.
+   * @throws UnexpectedReplicaStateException If the replica is not in the
+   *           expected state.
    */
   @Override // {@link FsDatasetSpi}
   public void checkBlock(ExtendedBlock b, long minLength, ReplicaState state)
       throws ReplicaNotFoundException, UnexpectedReplicaStateException {
     final BInfo binfo = getBInfo(b);
-    
+
     if (binfo == null) {
       throw new ReplicaNotFoundException(b);
     }
@@ -1108,7 +1106,7 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
     map.put(binfo.theBlock, binfo);
     return binfo;
   }
-  
+
   @Override // FsDatasetSpi
   public synchronized ReplicaHandler recoverRbw(
       ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd)
@@ -1157,12 +1155,11 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
       throws IOException {
     BInfo binfo = getBlockMap(b).get(b.getLocalBlock());
     if (binfo == null) {
-      throw new IOException("No such Block " + b );  
+      throw new IOException("No such Block " + b);
     }
-    
     return binfo.getIStream();
   }
-  
+
   @Override // FsDatasetSpi
   public synchronized InputStream getBlockInputStream(ExtendedBlock b,
       long seekOffset) throws IOException {
@@ -1183,10 +1180,10 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
       ) throws IOException {
     BInfo binfo = getBlockMap(b).get(b.getLocalBlock());
     if (binfo == null) {
-      throw new IOException("No such Block " + b );  
+      throw new IOException("No such Block " + b);
     }
     if (!binfo.finalized) {
-      throw new IOException("Block " + b + 
+      throw new IOException("Block " + b +
           " is being written, its meta cannot be read");
     }
     final SimulatedInputStream sin = binfo.getMetaIStream();
@@ -1199,23 +1196,20 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
 
   @Override // FsDatasetSpi
   public synchronized void adjustCrcChannelPosition(ExtendedBlock b,
-                                              ReplicaOutputStreams stream, 
-                                              int checksumSize)
-                                              throws IOException {
+      ReplicaOutputStreams stream, int checksumSize) throws IOException {
   }
 
-  /** 
-   * Simulated input and output streams
-   *
+  /**
+   * Simulated input and output streams.
    */
   static private class SimulatedInputStream extends java.io.InputStream {
     final long length; // bytes
     int currentPos = 0;
     byte[] data = null;
     Block theBlock = null;
-    
+
     /**
-     * An input stream of size l with repeated bytes
+     * An input stream of size l with repeated bytes.
      * @param l size of the stream
      * @param iRepeatedData byte that is repeated in the stream
      */
@@ -1223,7 +1217,7 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
       length = l;
       theBlock = b;
     }
-    
+
     /**
      * An input stream of of the supplied data
      * @param iData data to construct the stream
@@ -1232,7 +1226,7 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
       data = iData;
       length = data.length;
     }
-    
+
     /**
      * @return the lenght of the input stream
      */
@@ -1251,10 +1245,9 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
         return simulatedByte(theBlock, currentPos++) & BYTE_MASK;
       }
     }
-    
-    @Override
-    public int read(byte[] b) throws IOException { 
 
+    @Override
+    public int read(byte[] b) throws IOException {
       if (b == null) {
         throw new NullPointerException();
       }
@@ -1276,23 +1269,22 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
       return bytesRead;
     }
   }
-  
+
   /**
    * This class implements an output stream that merely throws its data away, 
but records its
    * length.
-   *
    */
   static private class SimulatedOutputStream extends OutputStream {
     long length = 0;
-    
+
     /**
      * constructor for Simulated Output Steram
      */
     SimulatedOutputStream() {
     }
-    
+
     /**
-     * 
+     *
      * @return the length of the data created so far.
      */
     long getLength() {
@@ -1304,29 +1296,25 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
     void setLength(long length) {
       this.length = length;
     }
-    
+
     @Override
     public void write(int arg0) throws IOException {
       length++;
     }
-    
+
     @Override
     public void write(byte[] b) throws IOException {
       length += b.length;
     }
-    
+
     @Override
-    public void write(byte[] b,
-              int off,
-              int len) throws IOException  {
+    public void write(byte[] b, int off, int len) throws IOException {
       length += len;
     }
   }
-  
-  private ObjectName mbeanName;
 
+  private ObjectName mbeanName;
 
-  
   /**
    * Register the FSDataset MBean using the name
    *        "hadoop:service=DataNode,name=FSDatasetState-<storageid>"
@@ -1335,7 +1323,7 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
    */
   void registerMBean(final String storageId) {
     // We wrap to bypass standard mbean naming convetion.
-    // This wraping can be removed in java 6 as it is more flexible in 
+    // This wraping can be removed in java 6 as it is more flexible in
     // package naming for mbeans and their impl.
     StandardMBean bean;
 
@@ -1346,7 +1334,7 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
     } catch (NotCompliantMBeanException e) {
       DataNode.LOG.warn("Error registering FSDatasetState MBean", e);
     }
- 
+
     DataNode.LOG.info("Registered FSDatasetState MBean");
   }
 
@@ -1359,7 +1347,7 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
   public String getStorageInfo() {
     return "Simulated FSDataset-" + datanodeUuid;
   }
-  
+
   @Override
   public boolean hasEnoughResource() {
     return true;
@@ -1371,12 +1359,12 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
     ExtendedBlock b = rBlock.getBlock();
     BInfo binfo = getBlockMap(b).get(b.getLocalBlock());
     if (binfo == null) {
-      throw new IOException("No such Block " + b );  
+      throw new IOException("No such Block " + b);
     }
 
-    return new ReplicaRecoveryInfo(binfo.getBlockId(), binfo.getBytesOnDisk(), 
-        binfo.getGenerationStamp(), 
-        binfo.isFinalized()?ReplicaState.FINALIZED : ReplicaState.RBW);
+    return new ReplicaRecoveryInfo(binfo.getBlockId(), binfo.getBytesOnDisk(),
+        binfo.getGenerationStamp(),
+        binfo.isFinalized() ? ReplicaState.FINALIZED : ReplicaState.RBW);
   }
 
   @Override // FsDatasetSpi
@@ -1398,14 +1386,14 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
       storage.addBlockPool(bpid);
     }
   }
-  
+
   @Override // FsDatasetSpi
   public void shutdownBlockPool(String bpid) {
     for (SimulatedStorage storage : storages) {
       storage.removeBlockPool(bpid);
     }
   }
-  
+
   @Override // FsDatasetSpi
   public void deleteBlockPool(String bpid, boolean force) {
      return;
@@ -1535,17 +1523,17 @@ public class SimulatedFSDataset implements 
FsDatasetSpi<FsVolumeSpi> {
     // TODO Auto-generated method stub
     return null;
   }
-  
+
   @Override
   public void setPinning(ExtendedBlock b) throws IOException {
     getBlockMap(b).get(b.getLocalBlock()).pinned = true;
   }
-  
+
   @Override
   public boolean getPinning(ExtendedBlock b) throws IOException {
     return getBlockMap(b).get(b.getLocalBlock()).pinned;
   }
-  
+
   @Override
   public boolean isDeletingBlock(String bpid, long blockId) {
     throw new UnsupportedOperationException();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73c660b4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index e29a147..8ea45c4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -35,7 +35,6 @@ import java.nio.channels.FileChannel;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Iterator;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Random;
 import java.util.concurrent.Executors;
@@ -924,10 +923,9 @@ public class TestDirectoryScanner {
     }
 
     @Override
-    public LinkedList<ScanInfo> compileReport(String bpid,
-        LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
+    public void compileReport(String bpid,
+        Collection<ScanInfo> report, ReportCompiler reportCompiler)
         throws InterruptedException, IOException {
-      return null;
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73c660b4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java
index ba32834..9d62499 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalVolumeImpl.java
@@ -22,14 +22,14 @@ import java.io.File;
 import java.io.IOException;
 import java.net.URI;
 import java.nio.channels.ClosedChannelException;
-import java.util.LinkedList;
+import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.DF;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
 import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
-import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner.ReportCompiler;
 import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
@@ -117,10 +117,8 @@ public class ExternalVolumeImpl implements FsVolumeSpi {
   }
 
   @Override
-  public LinkedList<ScanInfo> compileReport(String bpid,
-      LinkedList<ScanInfo> report, ReportCompiler reportCompiler)
-      throws InterruptedException, IOException {
-    return null;
+  public void compileReport(String bpid, Collection<ScanInfo> report,
+      ReportCompiler reportCompiler) throws InterruptedException, IOException {
   }
 
   @Override


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to