[
https://issues.apache.org/jira/browse/HDFS-14768?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16915527#comment-16915527
]
Hadoop QA commented on HDFS-14768:
----------------------------------
| (x) *{color:red}-1 overall{color}* |
\\
\\
|| Vote || Subsystem || Runtime || Comment ||
| {color:blue}0{color} | {color:blue} reexec {color} | {color:blue} 0m
20s{color} | {color:blue} Docker mode activated. {color} |
|| || || || {color:brown} Prechecks {color} ||
| {color:green}+1{color} | {color:green} @author {color} | {color:green} 0m
0s{color} | {color:green} The patch does not contain any @author tags. {color} |
| {color:red}-1{color} | {color:red} test4tests {color} | {color:red} 0m
0s{color} | {color:red} The patch doesn't appear to include any new or modified
tests. Please justify why no new tests are needed for this patch. Also please
list what manual steps were performed to verify this patch. {color} |
|| || || || {color:brown} trunk Compile Tests {color} ||
| {color:green}+1{color} | {color:green} mvninstall {color} | {color:green} 20m
7s{color} | {color:green} trunk passed {color} |
| {color:green}+1{color} | {color:green} compile {color} | {color:green} 1m
13s{color} | {color:green} trunk passed {color} |
| {color:green}+1{color} | {color:green} checkstyle {color} | {color:green} 0m
48s{color} | {color:green} trunk passed {color} |
| {color:green}+1{color} | {color:green} mvnsite {color} | {color:green} 1m
23s{color} | {color:green} trunk passed {color} |
| {color:green}+1{color} | {color:green} shadedclient {color} | {color:green}
15m 49s{color} | {color:green} branch has no errors when building and testing
our client artifacts. {color} |
| {color:green}+1{color} | {color:green} findbugs {color} | {color:green} 2m
52s{color} | {color:green} trunk passed {color} |
| {color:green}+1{color} | {color:green} javadoc {color} | {color:green} 1m
3s{color} | {color:green} trunk passed {color} |
|| || || || {color:brown} Patch Compile Tests {color} ||
| {color:green}+1{color} | {color:green} mvninstall {color} | {color:green} 1m
16s{color} | {color:green} the patch passed {color} |
| {color:green}+1{color} | {color:green} compile {color} | {color:green} 1m
9s{color} | {color:green} the patch passed {color} |
| {color:green}+1{color} | {color:green} javac {color} | {color:green} 1m
9s{color} | {color:green} the patch passed {color} |
| {color:orange}-0{color} | {color:orange} checkstyle {color} | {color:orange}
0m 50s{color} | {color:orange} hadoop-hdfs-project/hadoop-hdfs: The patch
generated 2 new + 111 unchanged - 1 fixed = 113 total (was 112) {color} |
| {color:green}+1{color} | {color:green} mvnsite {color} | {color:green} 1m
21s{color} | {color:green} the patch passed {color} |
| {color:green}+1{color} | {color:green} whitespace {color} | {color:green} 0m
0s{color} | {color:green} The patch has no whitespace issues. {color} |
| {color:green}+1{color} | {color:green} shadedclient {color} | {color:green}
13m 57s{color} | {color:green} patch has no errors when building and testing
our client artifacts. {color} |
| {color:green}+1{color} | {color:green} findbugs {color} | {color:green} 2m
47s{color} | {color:green} the patch passed {color} |
| {color:green}+1{color} | {color:green} javadoc {color} | {color:green} 1m
3s{color} | {color:green} the patch passed {color} |
|| || || || {color:brown} Other Tests {color} ||
| {color:red}-1{color} | {color:red} unit {color} | {color:red}120m 4s{color}
| {color:red} hadoop-hdfs in the patch failed. {color} |
| {color:green}+1{color} | {color:green} asflicense {color} | {color:green} 0m
31s{color} | {color:green} The patch does not generate ASF License warnings.
{color} |
| {color:black}{color} | {color:black} {color} | {color:black}186m 44s{color} |
{color:black} {color} |
\\
\\
|| Reason || Tests ||
| Failed junit tests | hadoop.hdfs.TestEncryptionZonesWithKMS |
| | hadoop.hdfs.shortcircuit.TestShortCircuitCache |
| | hadoop.hdfs.TestDFSStripedOutputStream |
| | hadoop.hdfs.TestReadStripedFileWithMissingBlocks |
| | hadoop.hdfs.TestQuota |
| | hadoop.hdfs.TestFileChecksum |
| | hadoop.hdfs.TestEncryptedTransfer |
| | hadoop.hdfs.server.namenode.TestReconstructStripedBlocks |
| | hadoop.hdfs.TestPersistBlocks |
| | hadoop.hdfs.TestWriteReadStripedFile |
| | hadoop.hdfs.TestFileCorruption |
| | hadoop.hdfs.TestFileChecksumCompositeCrc |
| | hadoop.hdfs.TestErasureCodingPolicies |
| | hadoop.hdfs.qjournal.client.TestQJMWithFaults |
| | hadoop.hdfs.shortcircuit.TestShortCircuitLocalRead |
\\
\\
|| Subsystem || Report/Notes ||
| Docker | Client=19.03.1 Server=19.03.1 Image:yetus/hadoop:bdbca0e53b4 |
| JIRA Issue | HDFS-14768 |
| JIRA Patch URL |
https://issues.apache.org/jira/secure/attachment/12978539/HDFS-14768.000.patch |
| Optional Tests | dupname asflicense compile javac javadoc mvninstall
mvnsite unit shadedclient findbugs checkstyle |
| uname | Linux ed7a9ccc2cf8 4.15.0-54-generic #58-Ubuntu SMP Mon Jun 24
10:55:24 UTC 2019 x86_64 x86_64 x86_64 GNU/Linux |
| Build tool | maven |
| Personality | /testptch/patchprocess/precommit/personality/provided.sh |
| git revision | trunk / d2225c8 |
| maven | version: Apache Maven 3.3.9 |
| Default Java | 1.8.0_222 |
| findbugs | v3.1.0-RC1 |
| checkstyle |
https://builds.apache.org/job/PreCommit-HDFS-Build/27670/artifact/out/diff-checkstyle-hadoop-hdfs-project_hadoop-hdfs.txt
|
| unit |
https://builds.apache.org/job/PreCommit-HDFS-Build/27670/artifact/out/patch-unit-hadoop-hdfs-project_hadoop-hdfs.txt
|
| Test Results |
https://builds.apache.org/job/PreCommit-HDFS-Build/27670/testReport/ |
| Max. process+thread count | 3055 (vs. ulimit of 5500) |
| modules | C: hadoop-hdfs-project/hadoop-hdfs U:
hadoop-hdfs-project/hadoop-hdfs |
| Console output |
https://builds.apache.org/job/PreCommit-HDFS-Build/27670/console |
| Powered by | Apache Yetus 0.8.0 http://yetus.apache.org |
This message was automatically generated.
> In some cases, erasure blocks are corruption when they are reconstruct.
> ------------------------------------------------------------------------
>
> Key: HDFS-14768
> URL: https://issues.apache.org/jira/browse/HDFS-14768
> Project: Hadoop HDFS
> Issue Type: Bug
> Components: datanode, erasure-coding, hdfs, namenode
> Affects Versions: 3.0.2
> Reporter: guojh
> Priority: Major
> Labels: patch
> Fix For: 3.3.0
>
> Attachments: HDFS-14768.000.patch
>
>
> Policy is RS-6-3-1024K, version is hadoop 3.0.2;
> We suppose a file's block Index is [0,1,2,3,4,5,6,7,8], And decommission
> index[3,4], increase the index 6 datanode's
> pendingReplicationWithoutTargets that make it large than
> replicationStreamsHardLimit(we set 14). Then, After the method
> chooseSourceDatanodes of BlockMananger, the liveBlockIndices is
> [0,1,2,3,4,5,7,8], Block Counter is, Live:7, Decommission:2.
> In method scheduleReconstruction of BlockManager, the additionalReplRequired
> is 9 - 7 = 2. After Namenode choose two target Datanode, will assign a
> erasureCode task to target datanode.
> When datanode get the task will build targetIndices from liveBlockIndices
> and target length. the code is blow.
> {code:java}
> // code placeholder
> targetIndices = new short[targets.length];
> private void initTargetIndices() {
> BitSet bitset = reconstructor.getLiveBitSet();
> int m = 0; hasValidTargets = false;
> for (int i = 0; i < dataBlkNum + parityBlkNum; i++) {
> if (!bitset.get) {
> if (reconstructor.getBlockLen > 0) {
> if (m < targets.length) {
> targetIndices[m++] = (short)i;
> hasValidTargets = true;
> }
> }
> }
> }
> {code}
> targetIndices[0]=6, and targetIndices[1] is aways 0 from initial value.
> The StripedReader is aways create reader from first 6 index block, and is
> [0,1,2,3,4,5]
> Use the index [0,1,2,3,4,5] to build target index[6,0] will trigger the isal
> bug. the block index6's data is corruption(all data is zero).
> I write a unit test can stabilize repreduce.
> {code:java}
> // code placeholder
> public void testFileDecommission() throws Exception {
> LOG.info("Starting test testFileDecommission");
> final Path ecFile = new Path(ecDir, "testFileDecommission");
> int writeBytes = cellSize * dataBlocks;
> writeStripedFile(dfs, ecFile, writeBytes);
> Assert.assertEquals(0, bm.numOfUnderReplicatedBlocks());
> FileChecksum fileChecksum1 = dfs.getFileChecksum(ecFile, writeBytes);
> LocatedBlocks locatedBlocks =
> StripedFileTestUtil.getLocatedBlocks(ecFile, dfs);
> LocatedBlock lb = dfs.getClient().getLocatedBlocks(ecFile.toString(), 0)
> .get(0);
> DatanodeInfo[] dnLocs = lb.getLocations();
> LocatedStripedBlock lastBlock =
> (LocatedStripedBlock)locatedBlocks.getLastLocatedBlock();
> DatanodeInfo[] storageInfos = lastBlock.getLocations();
> //
> DatanodeDescriptor datanodeDescriptor =
> cluster.getNameNode().getNamesystem()
>
> .getBlockManager().getDatanodeManager().getDatanode(storageInfos[6].getDatanodeUuid());
> for (int i = 0; i < 100; i++) {
> datanodeDescriptor.incrementPendingReplicationWithoutTargets();
> }
> assertEquals(dataBlocks + parityBlocks, dnLocs.length);
> int[] decommNodeIndex = {3, 4};
> final List<DatanodeInfo> decommisionNodes = new ArrayList<DatanodeInfo>();
> // add the node which will be decommissioning
> decommisionNodes.add(dnLocs[decommNodeIndex[0]]);
> decommisionNodes.add(dnLocs[decommNodeIndex[1]]);
> decommissionNode(0, decommisionNodes, AdminStates.DECOMMISSIONED);
> assertEquals(decommisionNodes.size(), fsn.getNumDecomLiveDataNodes());
> //assertNull(checkFile(dfs, ecFile, 9, decommisionNodes, numDNs));
> // Ensure decommissioned datanode is not automatically shutdown
> DFSClient client = getDfsClient(cluster.getNameNode(0), conf);
> assertEquals("All datanodes must be alive", numDNs,
> client.datanodeReport(DatanodeReportType.LIVE).length);
> FileChecksum fileChecksum2 = dfs.getFileChecksum(ecFile, writeBytes);
> Assert.assertTrue("Checksum mismatches!",
> fileChecksum1.equals(fileChecksum2));
> StripedFileTestUtil.checkData(dfs, ecFile, writeBytes, decommisionNodes,
> null, blockGroupSize);
> }
> {code}
>
--
This message was sent by Atlassian Jira
(v8.3.2#803003)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]