cndaimin commented on a change in pull request #3593:
URL: https://github.com/apache/hadoop/pull/3593#discussion_r740874125
##########
File path:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DebugAdmin.java
##########
@@ -387,6 +414,211 @@ int run(List<String> args) throws IOException {
}
}
+ /**
+ * The command for verifying the correctness of erasure coding on an erasure
coded file.
+ */
+ private class VerifyECCommand extends DebugCommand {
+ private DFSClient client;
+ private int dataBlkNum;
+ private int parityBlkNum;
+ private int cellSize;
+ private boolean useDNHostname;
+ private CachingStrategy cachingStrategy;
+ private int stripedReadBufferSize;
+ private CompletionService<Integer> readService;
+ private RawErasureDecoder decoder;
+ private BlockReader[] blockReaders;
+
+
+ VerifyECCommand() {
+ super("verifyEC",
+ "verifyEC -file <file>",
+ " Verify HDFS erasure coding on all block groups of the file.");
+ }
+
+ int run(List<String> args) throws IOException {
+ if (args.size() < 2) {
+ System.out.println(usageText);
+ System.out.println(helpText + System.lineSeparator());
+ return 1;
+ }
+ String file = StringUtils.popOptionWithArgument("-file", args);
+ Path path = new Path(file);
+ DistributedFileSystem dfs = AdminHelper.getDFS(getConf());
+ this.client = dfs.getClient();
+
+ FileStatus fileStatus;
+ try {
+ fileStatus = dfs.getFileStatus(path);
+ } catch (FileNotFoundException e) {
+ System.err.println("File " + file + " does not exist.");
+ return 1;
+ }
+
+ if (!fileStatus.isFile()) {
+ System.err.println("File " + file + " is not a regular file.");
+ return 1;
+ }
+ if (!dfs.isFileClosed(path)) {
+ System.err.println("File " + file + " is not closed.");
+ return 1;
+ }
+ this.useDNHostname =
getConf().getBoolean(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME,
+ DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT);
+ this.cachingStrategy = CachingStrategy.newDefaultStrategy();
+ this.stripedReadBufferSize = getConf().getInt(
+ DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_KEY,
+
DFSConfigKeys.DFS_DN_EC_RECONSTRUCTION_STRIPED_READ_BUFFER_SIZE_DEFAULT);
+
+ LocatedBlocks locatedBlocks = client.getLocatedBlocks(file, 0,
fileStatus.getLen());
+ if (locatedBlocks.getErasureCodingPolicy() == null) {
+ System.err.println("File " + file + " is not erasure coded.");
+ return 1;
+ }
+ ErasureCodingPolicy ecPolicy = locatedBlocks.getErasureCodingPolicy();
+ this.dataBlkNum = ecPolicy.getNumDataUnits();
+ this.parityBlkNum = ecPolicy.getNumParityUnits();
+ this.cellSize = ecPolicy.getCellSize();
+ this.decoder = CodecUtil.createRawDecoder(getConf(),
ecPolicy.getCodecName(),
+ new ErasureCoderOptions(
+ ecPolicy.getNumDataUnits(), ecPolicy.getNumParityUnits()));
+ int blockNum = dataBlkNum + parityBlkNum;
+ this.readService = new ExecutorCompletionService<>(
+ DFSUtilClient.getThreadPoolExecutor(blockNum, blockNum, 60,
+ new LinkedBlockingQueue<>(), "read-", false));
+ this.blockReaders = new BlockReader[dataBlkNum + parityBlkNum];
+
+ for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
+ System.out.println("Checking EC block group: blk_" +
locatedBlock.getBlock().getBlockId());
+ LocatedStripedBlock blockGroup = (LocatedStripedBlock) locatedBlock;
+
+ try {
+ verifyBlockGroup(blockGroup);
+ System.out.println("Status: OK");
+ } catch (Exception e) {
+ System.err.println("Status: ERROR, message: " + e.getMessage());
+ return 1;
+ } finally {
+ closeBlockReaders();
+ }
+ }
+ System.out.println("\nAll EC block group status: OK");
+ return 0;
+ }
+
+ private void verifyBlockGroup(LocatedStripedBlock blockGroup) throws
Exception {
+ final LocatedBlock[] indexedBlocks =
StripedBlockUtil.parseStripedBlockGroup(blockGroup,
+ cellSize, dataBlkNum, parityBlkNum);
+
+ int blockNumExpected = Math.min(dataBlkNum,
+ (int) ((blockGroup.getBlockSize() - 1) / cellSize + 1)) +
parityBlkNum;
+ if (blockGroup.getBlockIndices().length < blockNumExpected) {
+ throw new Exception("Block group is under-erasure-coded.");
+ }
+
+ long maxBlockLen = 0L;
+ DataChecksum checksum = null;
+ for (int i = 0; i < dataBlkNum + parityBlkNum; i++) {
+ LocatedBlock block = indexedBlocks[i];
+ if (block == null) {
+ blockReaders[i] = null;
+ continue;
+ }
+ if (block.getBlockSize() > maxBlockLen) {
+ maxBlockLen = block.getBlockSize();
+ }
+ BlockReader blockReader = createBlockReader(block.getBlock(),
+ block.getLocations()[0], block.getBlockToken());
+ if (checksum == null) {
+ checksum = blockReader.getDataChecksum();
+ } else {
+ assert checksum.equals(blockReader.getDataChecksum());
+ }
+ blockReaders[i] = blockReader;
+ }
+ assert checksum != null;
+ int bytesPerChecksum = checksum.getBytesPerChecksum();
Review comment:
I think the adjustion based on checksum size is just for performance
purpose instead of correctness. The minimum read unit on DN is the checksum
size. It will avoid IO waste when the client read is well aligned by checksum.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]