Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 190a53b89 -> d22a6a8b8


HDFS-11009. Add a tool to reconstruct block meta file from CLI.

(cherry picked from commit bc4a32aea50e86819730312e89315c0244ce64bf)

Conflicts:
        
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
        
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java

(cherry picked from commit ad7d3c4db8bfab007cc2ec1bad3c388fd7144369)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d22a6a8b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d22a6a8b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d22a6a8b

Branch: refs/heads/branch-2.8
Commit: d22a6a8b82504444b86d95930413978bd3396834
Parents: 190a53b
Author: Xiao Chen <x...@apache.org>
Authored: Tue Oct 18 18:32:27 2016 -0700
Committer: Xiao Chen <x...@apache.org>
Committed: Tue Oct 18 22:42:09 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/client/impl/DfsClientConf.java  |   4 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  46 ++++----
 .../datanode/fsdataset/impl/FsDatasetUtil.java  |  15 +++
 .../apache/hadoop/hdfs/tools/DebugAdmin.java    | 107 +++++++++++++++++--
 .../src/site/markdown/HDFSCommands.md           |  22 +++-
 .../hadoop/hdfs/tools/TestDebugAdmin.java       |  56 +++++++++-
 6 files changed, 209 insertions(+), 41 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d22a6a8b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
index 8848f86..316d374 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
@@ -287,7 +287,7 @@ public class DfsClientConf {
     return classes;
   }
 
-  private DataChecksum.Type getChecksumType(Configuration conf) {
+  private static DataChecksum.Type getChecksumType(Configuration conf) {
     final String checksum = conf.get(
         DFS_CHECKSUM_TYPE_KEY,
         DFS_CHECKSUM_TYPE_DEFAULT);
@@ -302,7 +302,7 @@ public class DfsClientConf {
   }
 
   // Construct a checksum option from conf
-  private ChecksumOpt getChecksumOptFromConf(Configuration conf) {
+  public static ChecksumOpt getChecksumOptFromConf(Configuration conf) {
     DataChecksum.Type type = getChecksumType(conf);
     int bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
         DFS_BYTES_PER_CHECKSUM_DEFAULT);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d22a6a8b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 53c6484..d688670 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -756,7 +756,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     }
     return f;
   }
-  
+
   /**
    * Return the File associated with a block, without first
    * checking that it exists. This should be used when the
@@ -812,7 +812,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     }
     return info;
   }
-  
+
   /**
    * Get the meta info of a block stored in volumeMap. Block is looked up
    * without matching the generation stamp.
@@ -831,7 +831,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     }
     return info;
   }
-  
+
   /**
    * Returns handles to the block file and its metadata file
    */
@@ -1021,7 +1021,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> 
{
    * @param blockFile block file for which the checksum will be computed
    * @throws IOException
    */
-  private static void computeChecksum(File srcMeta, File dstMeta,
+  static void computeChecksum(File srcMeta, File dstMeta,
       File blockFile, int smallBufferSize, final Configuration conf)
       throws IOException {
     final DataChecksum checksum = BlockMetadataHeader.readDataChecksum(srcMeta,
@@ -1086,20 +1086,20 @@ class FsDatasetImpl implements 
FsDatasetSpi<FsVolumeImpl> {
           + ") to newlen (=" + newlen + ")");
     }
 
-    DataChecksum dcs = BlockMetadataHeader.readHeader(metaFile).getChecksum(); 
+    DataChecksum dcs = BlockMetadataHeader.readHeader(metaFile).getChecksum();
     int checksumsize = dcs.getChecksumSize();
     int bpc = dcs.getBytesPerChecksum();
     long n = (newlen - 1)/bpc + 1;
     long newmetalen = BlockMetadataHeader.getHeaderSize() + n*checksumsize;
     long lastchunkoffset = (n - 1)*bpc;
-    int lastchunksize = (int)(newlen - lastchunkoffset); 
-    byte[] b = new byte[Math.max(lastchunksize, checksumsize)]; 
+    int lastchunksize = (int)(newlen - lastchunkoffset);
+    byte[] b = new byte[Math.max(lastchunksize, checksumsize)];
 
     RandomAccessFile blockRAF = new RandomAccessFile(blockFile, "rw");
     try {
-      //truncate blockFile 
+      //truncate blockFile
       blockRAF.setLength(newlen);
- 
+
       //read last chunk
       blockRAF.seek(lastchunkoffset);
       blockRAF.readFully(b, 0, lastchunksize);
@@ -1111,7 +1111,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> 
{
     dcs.update(b, 0, lastchunksize);
     dcs.writeValue(b, 0, false);
 
-    //update metaFile 
+    //update metaFile
     RandomAccessFile metaRAF = new RandomAccessFile(metaFile, "rw");
     try {
       metaRAF.setLength(newmetalen);
@@ -1358,13 +1358,13 @@ class FsDatasetImpl implements 
FsDatasetSpi<FsVolumeImpl> {
   /**
    * Bump a replica's generation stamp to a new one.
    * Its on-disk meta file name is renamed to be the new one too.
-   * 
+   *
    * @param replicaInfo a replica
    * @param newGS new generation stamp
    * @throws IOException if rename fails
    */
-  private void bumpReplicaGS(ReplicaInfo replicaInfo, 
-      long newGS) throws IOException { 
+  private void bumpReplicaGS(ReplicaInfo replicaInfo,
+      long newGS) throws IOException {
     long oldGS = replicaInfo.getGenerationStamp();
     File oldmeta = replicaInfo.getMetaFile();
     replicaInfo.setGenerationStamp(newGS);
@@ -1456,7 +1456,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> 
{
       try {
         try(AutoCloseableLock lock = datasetLock.acquire()) {
           ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), 
b.getBlockId());
-          
+
           // check the replica's state
           if (replicaInfo.getState() != ReplicaState.RBW) {
             throw new ReplicaNotFoundException(
@@ -1685,7 +1685,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> 
{
       finalizeReplica(b.getBlockPoolId(), replicaInfo);
     }
   }
-  
+
   private FinalizedReplica finalizeReplica(String bpid,
       ReplicaInfo replicaInfo) throws IOException {
     try(AutoCloseableLock lock = datasetLock.acquire()) {
@@ -1945,15 +1945,15 @@ class FsDatasetImpl implements 
FsDatasetSpi<FsVolumeImpl> {
     try(AutoCloseableLock lock = datasetLock.acquire()) {
       f = getFile(bpid, blockId, false);
     }
-    
+
     if(f != null ) {
       if(f.exists())
         return f;
-   
+
       // if file is not null, but doesn't exist - possibly disk failed
       datanode.checkDiskErrorAsync();
     }
-    
+
     if (LOG.isDebugEnabled()) {
       LOG.debug("blockId=" + blockId + ", f=" + f);
     }
@@ -2191,7 +2191,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> 
{
       }
       return info.getBlockFile();
     }
-    return null;    
+    return null;
   }
 
   /**
@@ -2344,7 +2344,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> 
{
        */
       if (memBlockInfo == null) {
         // Block is missing in memory - add the block to volumeMap
-        ReplicaInfo diskBlockInfo = new FinalizedReplica(blockId, 
+        ReplicaInfo diskBlockInfo = new FinalizedReplica(blockId,
             diskFile.length(), diskGS, vol, diskFile.getParentFile());
         volumeMap.add(bpid, diskBlockInfo);
         if (vol.isTransientStorage()) {
@@ -2854,7 +2854,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> 
{
     for (int i = 0; i < curVolumes.size(); i++) {
       blocksVolumeIds.add(ByteBuffer.allocate(4).putInt(i).array());
     }
-    // Determine the index of the VolumeId of each block's volume, by 
comparing 
+    // Determine the index of the VolumeId of each block's volume, by comparing
     // the block's volume against the enumerated volumes
     for (int i = 0; i < blockIds.length; i++) {
       long blockId = blockIds[i];
@@ -3198,7 +3198,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> 
{
 
     File f = getBlockFile(block);
     Path p = new Path(f.getAbsolutePath());
-    
+
     FsPermission oldPermission = localFS.getFileStatus(
         new Path(f.getAbsolutePath())).getPermission();
     //sticky bit is used for pinning purpose
@@ -3213,7 +3213,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> 
{
       return  false;
     }
     File f = getBlockFile(block);
-        
+
     FileStatus fss = localFS.getFileStatus(new Path(f.getAbsolutePath()));
     return fss.getPermission().getStickyBit();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d22a6a8b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java
index f695c8c..4af8773 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java
@@ -22,7 +22,9 @@ import java.io.FilenameFilter;
 import java.io.IOException;
 import java.util.Arrays;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
@@ -105,4 +107,17 @@ public class FsDatasetUtil {
           + blockFile + ", metaFile=" + metaFile, nfe);
     }
   }
+
+  /**
+   * Compute the checksum for a block file that does not already have
+   * its checksum computed, and save it to dstMeta file.
+   */
+  public static void computeChecksum(File srcMeta, File dstMeta, File 
blockFile,
+      int smallBufferSize, Configuration conf) throws IOException {
+    Preconditions.checkNotNull(srcMeta);
+    Preconditions.checkNotNull(dstMeta);
+    Preconditions.checkNotNull(blockFile);
+    FsDatasetImpl.computeChecksum(srcMeta, dstMeta, blockFile,
+        smallBufferSize, conf);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d22a6a8b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
index a2b91ab..e2a6b3e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
@@ -17,7 +17,11 @@
  */
 package org.apache.hadoop.hdfs.tools;
 
+import java.io.BufferedOutputStream;
 import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -30,14 +34,19 @@ import java.util.List;
 import java.util.concurrent.TimeUnit;
 
 import com.google.common.util.concurrent.Uninterruptibles;
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSUtilClient;
+import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetUtil;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.StringUtils;
@@ -56,9 +65,10 @@ public class DebugAdmin extends Configured implements Tool {
    * All the debug commands we can run.
    */
   private DebugCommand DEBUG_COMMANDS[] = {
-    new VerifyBlockChecksumCommand(),
-    new RecoverLeaseCommand(),
-    new HelpCommand()
+      new VerifyMetaCommand(),
+      new ComputeMetaCommand(),
+      new RecoverLeaseCommand(),
+      new HelpCommand()
   };
 
   /**
@@ -83,10 +93,10 @@ public class DebugAdmin extends Configured implements Tool {
   /**
    * The command for verifying a block metadata file and possibly block file.
    */
-  private class VerifyBlockChecksumCommand extends DebugCommand {
-    VerifyBlockChecksumCommand() {
-      super("verify",
-"verify -meta <metadata-file> [-block <block-file>]",
+  private class VerifyMetaCommand extends DebugCommand {
+    VerifyMetaCommand() {
+      super("verifyMeta",
+"verifyMeta -meta <metadata-file> [-block <block-file>]",
 "  Verify HDFS metadata and block files.  If a block file is specified, we\n" +
 "  will verify that the checksums in the metadata file match the block\n" +
 "  file.");
@@ -195,6 +205,86 @@ public class DebugAdmin extends Configured implements Tool 
{
   }
 
   /**
+   * The command for verifying a block metadata file and possibly block file.
+   */
+  private class ComputeMetaCommand extends DebugCommand {
+    ComputeMetaCommand() {
+      super("computeMeta",
+          "computeMeta -block <block-file> -out <output-metadata-file>",
+          "  Compute HDFS metadata from the specified block file, and save it"
+              + " to\n  the specified output metadata file.\n\n"
+              + "**NOTE: Use at your own risk!\n If the block file is corrupt"
+              + " and you overwrite it's meta file, \n it will show up"
+              + " as good in HDFS, but you can't read the data.\n"
+              + " Only use as a last measure, and when you are 100% certain"
+              + " the block file is good.");
+    }
+
+    private DataChecksum createChecksum(Options.ChecksumOpt opt) {
+      DataChecksum dataChecksum = DataChecksum
+          .newDataChecksum(opt.getChecksumType(), opt.getBytesPerChecksum());
+      if (dataChecksum == null) {
+        throw new HadoopIllegalArgumentException(
+            "Invalid checksum type: userOpt=" + opt + ", default=" + opt
+                + ", effective=null");
+      }
+      return dataChecksum;
+    }
+
+    int run(List<String> args) throws IOException {
+      if (args.size() == 0) {
+        System.out.println(usageText);
+        System.out.println(helpText + "\n");
+        return 1;
+      }
+      final String name = StringUtils.popOptionWithArgument("-block", args);
+      if (name == null) {
+        System.err.println("You must specify a block file with -block");
+        return 2;
+      }
+      final File blockFile = new File(name);
+      if (!blockFile.exists() || !blockFile.isFile()) {
+        System.err.println("Block file <" + name + "> does not exist "
+            + "or is not a file");
+        return 3;
+      }
+      final String outFile = StringUtils.popOptionWithArgument("-out", args);
+      if (outFile == null) {
+        System.err.println("You must specify a output file with -out");
+        return 4;
+      }
+      final File srcMeta = new File(outFile);
+      if (srcMeta.exists()) {
+        System.err.println("output file already exists!");
+        return 5;
+      }
+
+      DataOutputStream metaOut = null;
+      try {
+        final Configuration conf = new Configuration();
+        final Options.ChecksumOpt checksumOpt =
+            DfsClientConf.getChecksumOptFromConf(conf);
+        final DataChecksum checksum = createChecksum(checksumOpt);
+
+        final int smallBufferSize = DFSUtilClient.getSmallBufferSize(conf);
+        metaOut = new DataOutputStream(
+            new BufferedOutputStream(new FileOutputStream(srcMeta),
+                smallBufferSize));
+        BlockMetadataHeader.writeHeader(metaOut, checksum);
+        metaOut.close();
+        FsDatasetUtil.computeChecksum(
+            srcMeta, srcMeta, blockFile, smallBufferSize, conf);
+        System.out.println(
+            "Checksum calculation succeeded on block file " + name
+                + " saved metadata to meta file " + outFile);
+        return 0;
+      } finally {
+        IOUtils.cleanup(null, metaOut);
+      }
+    }
+  }
+
+  /**
    * The command for recovering a file lease.
    */
   private class RecoverLeaseCommand extends DebugCommand {
@@ -353,6 +443,9 @@ public class DebugAdmin extends Configured implements Tool {
 
   private void printUsage() {
     System.out.println("Usage: hdfs debug <command> [arguments]\n");
+    System.out.println("These commands are for advanced users only.\n");
+    System.out.println("Incorrect usages may result in data loss. " +
+        "Use at your own risk.\n");
     for (DebugCommand command : DEBUG_COMMANDS) {
       if (!command.name.equals("help")) {
         System.out.println(command.usageText);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d22a6a8b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index fa2dae4..e594a59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -46,7 +46,8 @@ HDFS Commands Guide
     * [storagepolicies](#storagepolicies)
     * [zkfc](#zkfc)
 * [Debug Commands](#Debug_Commands)
-    * [verify](#verify)
+    * [verifyMeta](#verifyMeta)
+    * [computeMeta](#computeMeta)
     * [recoverLease](#recoverLease)
 
 Overview
@@ -511,11 +512,11 @@ This comamnd starts a Zookeeper Failover Controller 
process for use with [HDFS H
 Debug Commands
 --------------
 
-Useful commands to help administrators debug HDFS issues, like validating 
block files and calling recoverLease.
+Useful commands to help administrators debug HDFS issues. These commands are 
for advanced users only.
 
-### `verify`
+### `verifyMeta`
 
-Usage: `hdfs debug verify -meta <metadata-file> [-block <block-file>]`
+Usage: `hdfs debug verifyMeta -meta <metadata-file> [-block <block-file>]`
 
 | COMMAND\_OPTION | Description |
 |:---- |:---- |
@@ -524,6 +525,19 @@ Usage: `hdfs debug verify -meta <metadata-file> [-block 
<block-file>]`
 
 Verify HDFS metadata and block files. If a block file is specified, we will 
verify that the checksums in the metadata file match the block file.
 
+### `computeMeta`
+
+Usage: `hdfs debug computeMeta -block <block-file> -out <output-metadata-file>`
+
+| COMMAND\_OPTION | Description |
+|:---- |:---- |
+| `-block` *block-file* | Absolute path for the block file on the local file 
system of the data node. |
+| `-out` *output-metadata-file* | Absolute path for the output metadata file 
to store the checksum computation result from the block file. |
+
+Compute HDFS metadata from block files. If a block file is specified, we will 
compute the checksums from the block file, and save it to the specified output 
metadata file.
+
+**NOTE**: Use at your own risk! If the block file is corrupt and you overwrite 
it's meta file, it will show up as 'good' in HDFS, but you can't read the data. 
Only use as a last measure, and when you are 100% certain the block file is 
good.
+
 ### `recoverLease`
 
 Usage: `hdfs debug recoverLease -path <path> [-retries <num-retries>]`

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d22a6a8b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDebugAdmin.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDebugAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDebugAdmin.java
index 07f70e0..e8ee9e1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDebugAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDebugAdmin.java
@@ -40,6 +40,11 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 public class TestDebugAdmin {
+
+  static private final String TEST_ROOT_DIR =
+      new File(System.getProperty("test.build.data", "/tmp"),
+          TestDebugAdmin.class.getSimpleName()).getAbsolutePath();
+
   private MiniDFSCluster cluster;
   private DistributedFileSystem fs;
   private DebugAdmin admin;
@@ -47,6 +52,9 @@ public class TestDebugAdmin {
 
   @Before
   public void setUp() throws Exception {
+    final File testRoot = new File(TEST_ROOT_DIR);
+    testRoot.delete();
+    testRoot.mkdirs();
     Configuration conf = new Configuration();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     cluster.waitActive();
@@ -94,31 +102,69 @@ public class TestDebugAdmin {
   }
 
   @Test(timeout = 60000)
-  public void testVerifyBlockChecksumCommand() throws Exception {
+  public void testVerifyMetaCommand() throws Exception {
     DFSTestUtil.createFile(fs, new Path("/bar"), 1234, (short) 1, 0xdeadbeef);
     FsDatasetSpi<?> fsd = datanode.getFSDataset();
     ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, new Path("/bar"));
     File blockFile = getBlockFile(fsd,
         block.getBlockPoolId(), block.getLocalBlock());
-    assertEquals("ret: 1, You must specify a meta file with -meta",
-        runCmd(new String[]{"verify", "-block", blockFile.getAbsolutePath()}));
+    assertEquals("ret: 1, You must specify a meta file with -meta", runCmd(
+        new String[] {"verifyMeta", "-block", blockFile.getAbsolutePath()}));
     File metaFile = getMetaFile(fsd,
         block.getBlockPoolId(), block.getLocalBlock());
     assertEquals("ret: 0, Checksum type: " +
           "DataChecksum(type=CRC32C, chunkSize=512)",
-        runCmd(new String[]{"verify",
+        runCmd(new String[]{"verifyMeta",
             "-meta", metaFile.getAbsolutePath()}));
     assertEquals("ret: 0, Checksum type: " +
           "DataChecksum(type=CRC32C, chunkSize=512)" +
           "Checksum verification succeeded on block file " +
           blockFile.getAbsolutePath(),
-        runCmd(new String[]{"verify",
+        runCmd(new String[]{"verifyMeta",
             "-meta", metaFile.getAbsolutePath(),
             "-block", blockFile.getAbsolutePath()})
     );
   }
 
   @Test(timeout = 60000)
+  public void testComputeMetaCommand() throws Exception {
+    DFSTestUtil.createFile(fs, new Path("/bar"), 1234, (short) 1, 0xdeadbeef);
+    FsDatasetSpi<?> fsd = datanode.getFSDataset();
+    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, new Path("/bar"));
+    File blockFile = getBlockFile(fsd,
+        block.getBlockPoolId(), block.getLocalBlock());
+
+    assertEquals("ret: 1, computeMeta -block <block-file> -out "
+            + "<output-metadata-file>  Compute HDFS metadata from the 
specified"
+            + " block file, and save it to  the specified output metadata 
file."
+            + "**NOTE: Use at your own risk! If the block file is corrupt"
+            + " and you overwrite it's meta file,  it will show up"
+            + " as good in HDFS, but you can't read the data."
+            + " Only use as a last measure, and when you are 100% certain"
+            + " the block file is good.",
+        runCmd(new String[] {"computeMeta"}));
+    assertEquals("ret: 2, You must specify a block file with -block",
+        runCmd(new String[] {"computeMeta", "-whatever"}));
+    assertEquals("ret: 3, Block file <bla> does not exist or is not a file",
+        runCmd(new String[] {"computeMeta", "-block", "bla"}));
+    assertEquals("ret: 4, You must specify a output file with -out", runCmd(
+        new String[] {"computeMeta", "-block", blockFile.getAbsolutePath()}));
+    assertEquals("ret: 5, output file already exists!", runCmd(
+        new String[] {"computeMeta", "-block", blockFile.getAbsolutePath(),
+            "-out", blockFile.getAbsolutePath()}));
+
+    File outFile = new File(TEST_ROOT_DIR, "out.meta");
+    outFile.delete();
+    assertEquals("ret: 0, Checksum calculation succeeded on block file " +
+        blockFile.getAbsolutePath() + " saved metadata to meta file " +
+        outFile.getAbsolutePath(), runCmd(new String[] {"computeMeta", 
"-block",
+        blockFile.getAbsolutePath(), "-out", outFile.getAbsolutePath()}));
+
+    assertTrue(outFile.exists());
+    assertTrue(outFile.length() > 0);
+  }
+
+  @Test(timeout = 60000)
   public void testRecoverLeaseforFileNotFound() throws Exception {
     assertTrue(runCmd(new String[] {
         "recoverLease", "-path", "/foo", "-retries", "2" }).contains(


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to