This is an automated email from the ASF dual-hosted git repository.

vinayakumarb pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 7806403  HDFS-14266. EC : Fsck -blockId shows null for EC Blocks if 
One Block Is Not Available. Contributed by Ayush Saxena.
7806403 is described below

commit 7806403842ddd0f5b339e3dca42688b970cae267
Author: Vinayakumar B <vinayakum...@apache.org>
AuthorDate: Tue Feb 12 21:57:57 2019 +0530

    HDFS-14266. EC : Fsck -blockId shows null for EC Blocks if One Block Is Not 
Available. Contributed by Ayush Saxena.
---
 .../hadoop/hdfs/server/namenode/NamenodeFsck.java  | 56 ++++++++++++++--------
 .../blockmanagement/TestBlockInfoStriped.java      | 45 +++++++++++++++++
 2 files changed, 80 insertions(+), 21 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 9c39d86..ad8aaaa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -314,29 +314,22 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
       if (blockManager.getCorruptReplicas(block) != null) {
         corruptionRecord = blockManager.getCorruptReplicas(block);
       }
-
-      //report block replicas status on datanodes
-      for(int idx = (blockInfo.numNodes()-1); idx >= 0; idx--) {
-        DatanodeDescriptor dn = blockInfo.getDatanode(idx);
-        out.print("Block replica on datanode/rack: " + dn.getHostName() +
-            dn.getNetworkLocation() + " ");
-        if (corruptionRecord != null && corruptionRecord.contains(dn)) {
-          out.print(CORRUPT_STATUS + "\t ReasonCode: " +
-              blockManager.getCorruptReason(block, dn));
-        } else if (dn.isDecommissioned() ){
-          out.print(DECOMMISSIONED_STATUS);
-        } else if (dn.isDecommissionInProgress()) {
-          out.print(DECOMMISSIONING_STATUS);
-        } else if (this.showMaintenanceState && dn.isEnteringMaintenance()) {
-          out.print(ENTERING_MAINTENANCE_STATUS);
-        } else if (this.showMaintenanceState && dn.isInMaintenance()) {
-          out.print(IN_MAINTENANCE_STATUS);
-        } else {
-          out.print(HEALTHY_STATUS);
+      // report block replicas status on datanodes
+      if (blockInfo.isStriped()) {
+        for (int idx = (blockInfo.getCapacity() - 1); idx >= 0; idx--) {
+          DatanodeDescriptor dn = blockInfo.getDatanode(idx);
+          if (dn == null) {
+            continue;
+          }
+          printDatanodeReplicaStatus(block, corruptionRecord, dn);
+        }
+      } else {
+        for (int idx = (blockInfo.numNodes() - 1); idx >= 0; idx--) {
+          DatanodeDescriptor dn = blockInfo.getDatanode(idx);
+          printDatanodeReplicaStatus(block, corruptionRecord, dn);
         }
-        out.print("\n");
       }
-    } catch (Exception e){
+    } catch (Exception e) {
       String errMsg = "Fsck on blockId '" + blockId;
       LOG.warn(errMsg, e);
       out.println(e.getMessage());
@@ -347,6 +340,27 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
     }
   }
 
+  private void printDatanodeReplicaStatus(Block block,
+      Collection<DatanodeDescriptor> corruptionRecord, DatanodeDescriptor dn) {
+    out.print("Block replica on datanode/rack: " + dn.getHostName() +
+        dn.getNetworkLocation() + " ");
+    if (corruptionRecord != null && corruptionRecord.contains(dn)) {
+      out.print(CORRUPT_STATUS + "\t ReasonCode: " +
+          blockManager.getCorruptReason(block, dn));
+    } else if (dn.isDecommissioned()){
+      out.print(DECOMMISSIONED_STATUS);
+    } else if (dn.isDecommissionInProgress()) {
+      out.print(DECOMMISSIONING_STATUS);
+    } else if (this.showMaintenanceState && dn.isEnteringMaintenance()) {
+      out.print(ENTERING_MAINTENANCE_STATUS);
+    } else if (this.showMaintenanceState && dn.isInMaintenance()) {
+      out.print(IN_MAINTENANCE_STATUS);
+    } else {
+      out.print(HEALTHY_STATUS);
+    }
+    out.print("\n");
+  }
+
   /**
    * Check files on DFS, starting from the indicated path.
    */
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
index d20d2fd..878edf2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfoStriped.java
@@ -17,11 +17,19 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.StripedFileTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.tools.DFSck;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.Whitebox;
+import org.apache.hadoop.util.ToolRunner;
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
@@ -31,12 +39,15 @@ import org.junit.runners.Parameterized;
 
 import java.io.DataOutput;
 import java.io.DataOutputStream;
+import java.io.File;
+import java.io.PrintStream;
 import java.io.ByteArrayOutputStream;
 import java.nio.ByteBuffer;
 import java.util.Collection;
 
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.fail;
 
 /**
@@ -213,6 +224,40 @@ public class TestBlockInfoStriped {
   }
 
   @Test
+  public void testGetBlockInfo() throws IllegalArgumentException, Exception {
+    int dataBlocks = testECPolicy.getNumDataUnits();
+    int parityBlocks = testECPolicy.getNumParityUnits();
+    int totalSize = dataBlocks + parityBlocks;
+    File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
+    Configuration conf = new Configuration();
+    try (MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf, 
builderBaseDir).numDataNodes(totalSize)
+            .build()) {
+      DistributedFileSystem fs = cluster.getFileSystem();
+      fs.enableErasureCodingPolicy(
+          StripedFileTestUtil.getDefaultECPolicy().getName());
+      fs.enableErasureCodingPolicy(testECPolicy.getName());
+      fs.mkdirs(new Path("/ecDir"));
+      fs.setErasureCodingPolicy(new Path("/ecDir"), testECPolicy.getName());
+      DFSTestUtil.createFile(fs, new Path("/ecDir/ecFile"),
+          fs.getDefaultBlockSize() * dataBlocks, (short) 1, 1024);
+      ExtendedBlock blk = DFSTestUtil
+          .getAllBlocks(fs, new Path("/ecDir/ecFile")).get(0).getBlock();
+      String id = "blk_" + Long.toString(blk.getBlockId());
+      BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager()
+          .getStoredBlock(blk.getLocalBlock());
+      DatanodeStorageInfo[] dnStorageInfo = cluster.getNameNode()
+          .getNamesystem().getBlockManager().getStorages(bInfo);
+      bInfo.removeStorage(dnStorageInfo[1]);
+      ByteArrayOutputStream bStream = new ByteArrayOutputStream();
+      PrintStream out = new PrintStream(bStream, true);
+      assertEquals(0, ToolRunner.run(new DFSck(conf, out), new String[] {
+          new Path("/ecDir/ecFile").toString(), "-blockId", id }));
+      assertFalse(out.toString().contains("null"));
+    }
+  }
+
+  @Test
   public void testWrite() {
     long blkID = 1;
     long numBytes = 1;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to