See https://builds.apache.org/job/Hadoop-Hdfs-trunk/2607/
###################################################################################
########################## LAST 60 LINES OF THE CONSOLE
###########################
[...truncated 7560 lines...]
[INFO] --- maven-antrun-plugin:1.7:run (create-testdirs) @ hadoop-hdfs-project
---
[INFO] Executing tasks
main:
[mkdir] Created dir:
/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/target/test-dir
[INFO] Executed tasks
[INFO]
[INFO] --- maven-source-plugin:2.3:jar-no-fork (hadoop-java-sources) @
hadoop-hdfs-project ---
[INFO]
[INFO] --- maven-source-plugin:2.3:test-jar-no-fork (hadoop-java-sources) @
hadoop-hdfs-project ---
[INFO]
[INFO] --- maven-enforcer-plugin:1.3.1:enforce (dist-enforce) @
hadoop-hdfs-project ---
[INFO]
[INFO] --- maven-site-plugin:3.4:attach-descriptor (attach-descriptor) @
hadoop-hdfs-project ---
[INFO]
[INFO] --- maven-javadoc-plugin:2.8.1:jar (module-javadocs) @
hadoop-hdfs-project ---
[INFO] Skipping javadoc generation
[INFO]
[INFO] --- maven-enforcer-plugin:1.3.1:enforce (depcheck) @ hadoop-hdfs-project
---
[INFO]
[INFO] --- maven-checkstyle-plugin:2.15:checkstyle (default-cli) @
hadoop-hdfs-project ---
[INFO]
[INFO] --- findbugs-maven-plugin:3.0.0:findbugs (default-cli) @
hadoop-hdfs-project ---
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO]
[INFO] Apache Hadoop HDFS Client ......................... SUCCESS [07:47 min]
[INFO] Apache Hadoop HDFS ................................ FAILURE [ 05:55 h]
[INFO] Apache Hadoop HDFS Native Client .................. SKIPPED
[INFO] Apache Hadoop HttpFS .............................. SKIPPED
[INFO] Apache Hadoop HDFS BookKeeper Journal ............. SKIPPED
[INFO] Apache Hadoop HDFS-NFS ............................ SKIPPED
[INFO] Apache Hadoop HDFS Project ........................ SUCCESS [ 0.084 s]
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 06:03 h
[INFO] Finished at: 2015-12-06T23:56:16+00:00
[INFO] Final Memory: 59M/767M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal
org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on
project hadoop-hdfs: There are test failures.
[ERROR]
[ERROR] Please refer to
/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/surefire-reports
for the individual test results.
[ERROR] -> [Help 1]
[ERROR]
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e
switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR]
[ERROR] For more information about the errors and possible solutions, please
read the following articles:
[ERROR] [Help 1]
http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR]
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR] mvn <goals> -rf :hadoop-hdfs
Build step 'Execute shell' marked build as failure
Archiving artifacts
Recording test results
Sending e-mails to: [email protected]
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any
###################################################################################
############################## FAILED TESTS (if any)
##############################
5 tests failed.
FAILED: org.apache.hadoop.hdfs.TestEncryptionZones.testStartFileRetry
Error Message:
test timed out after 120000 milliseconds
Stack Trace:
java.lang.Exception: test timed out after 120000 milliseconds
at sun.misc.Unsafe.park(Native Method)
at java.util.concurrent.locks.LockSupport.park(LockSupport.java:186)
at
java.util.concurrent.locks.AbstractQueuedSynchronizer.parkAndCheckInterrupt(AbstractQueuedSynchronizer.java:834)
at
java.util.concurrent.locks.AbstractQueuedSynchronizer.doAcquireSharedInterruptibly(AbstractQueuedSynchronizer.java:994)
at
java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1303)
at java.util.concurrent.CountDownLatch.await(CountDownLatch.java:236)
at
org.apache.hadoop.hdfs.TestEncryptionZones.testStartFileRetry(TestEncryptionZones.java:1093)
FAILED:
org.apache.hadoop.hdfs.qjournal.TestSecureNNWithQJM.testSecondaryNameNodeHttpAddressNotNeeded
Error Message:
Error replaying edit log at offset 0. Expected transaction ID was 1
Stack Trace:
org.apache.hadoop.hdfs.server.namenode.EditLogInputException: Error replaying
edit log at offset 0. Expected transaction ID was 1
at
org.apache.hadoop.hdfs.server.namenode.RedundantEditLogInputStream.nextOp(RedundantEditLogInputStream.java:194)
at
org.apache.hadoop.hdfs.server.namenode.EditLogInputStream.readOp(EditLogInputStream.java:85)
at
org.apache.hadoop.hdfs.server.namenode.EditLogInputStream.skipUntil(EditLogInputStream.java:151)
at
org.apache.hadoop.hdfs.server.namenode.RedundantEditLogInputStream.nextOp(RedundantEditLogInputStream.java:178)
at
org.apache.hadoop.hdfs.server.namenode.EditLogInputStream.readOp(EditLogInputStream.java:85)
at
org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.loadEditRecords(FSEditLogLoader.java:194)
at
org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.loadFSEdits(FSEditLogLoader.java:147)
at
org.apache.hadoop.hdfs.server.namenode.FSImage.loadEdits(FSImage.java:831)
at
org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:688)
at
org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:290)
at
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFSImage(FSNamesystem.java:960)
at
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFromDisk(FSNamesystem.java:666)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.loadNamesystem(NameNode.java:621)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:678)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:884)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:863)
at
org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1565)
at
org.apache.hadoop.hdfs.MiniDFSCluster.restartNameNode(MiniDFSCluster.java:2017)
at
org.apache.hadoop.hdfs.MiniDFSCluster.restartNameNode(MiniDFSCluster.java:1982)
at
org.apache.hadoop.hdfs.qjournal.TestSecureNNWithQJM.restartNameNode(TestSecureNNWithQJM.java:203)
at
org.apache.hadoop.hdfs.qjournal.TestSecureNNWithQJM.doNNWithQJMTest(TestSecureNNWithQJM.java:185)
at
org.apache.hadoop.hdfs.qjournal.TestSecureNNWithQJM.testSecondaryNameNodeHttpAddressNotNeeded(TestSecureNNWithQJM.java:171)
FAILED:
org.apache.hadoop.hdfs.server.namenode.snapshot.TestOpenFilesWithSnapshot.testOpenFilesWithRename
Error Message:
Timed out waiting for Mini HDFS Cluster to start
Stack Trace:
java.io.IOException: Timed out waiting for Mini HDFS Cluster to start
at
org.apache.hadoop.hdfs.MiniDFSCluster.waitClusterUp(MiniDFSCluster.java:1345)
at
org.apache.hadoop.hdfs.MiniDFSCluster.restartNameNode(MiniDFSCluster.java:2021)
at
org.apache.hadoop.hdfs.MiniDFSCluster.restartNameNode(MiniDFSCluster.java:1991)
at
org.apache.hadoop.hdfs.server.namenode.snapshot.TestOpenFilesWithSnapshot.testOpenFilesWithRename(TestOpenFilesWithSnapshot.java:208)
FAILED: org.apache.hadoop.tracing.TestTracing.testTracing
Error Message:
expected:<-767290065032346203> but was:<-1752631840377953925>
Stack Trace:
java.lang.AssertionError: expected:<-767290065032346203> but
was:<-1752631840377953925>
at org.junit.Assert.fail(Assert.java:88)
at org.junit.Assert.failNotEquals(Assert.java:743)
at org.junit.Assert.assertEquals(Assert.java:118)
at org.junit.Assert.assertEquals(Assert.java:555)
at org.junit.Assert.assertEquals(Assert.java:542)
at
org.apache.hadoop.tracing.TestTracing.readWithTracing(TestTracing.java:177)
at
org.apache.hadoop.tracing.TestTracing.testTracing(TestTracing.java:80)
FAILED:
org.apache.hadoop.hdfs.server.datanode.TestDirectoryScanner.testThrottling
Error Message:
test timed out after 300000 milliseconds
Stack Trace:
java.lang.Exception: test timed out after 300000 milliseconds
at java.lang.Object.wait(Native Method)
at java.lang.Object.wait(Object.java:503)
at
org.apache.hadoop.hdfs.DataStreamer.waitAndQueuePacket(DataStreamer.java:804)
at
org.apache.hadoop.hdfs.DFSOutputStream.enqueueCurrentPacket(DFSOutputStream.java:423)
at
org.apache.hadoop.hdfs.DFSOutputStream.enqueueCurrentPacketFull(DFSOutputStream.java:432)
at
org.apache.hadoop.hdfs.DFSOutputStream.writeChunk(DFSOutputStream.java:418)
at
org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(FSOutputSummer.java:217)
at org.apache.hadoop.fs.FSOutputSummer.write1(FSOutputSummer.java:125)
at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:111)
at
org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:57)
at java.io.DataOutputStream.write(DataOutputStream.java:107)
at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:418)
at org.apache.hadoop.hdfs.DFSTestUtil.createFile(DFSTestUtil.java:376)
at
org.apache.hadoop.hdfs.server.datanode.TestDirectoryScanner.createFile(TestDirectoryScanner.java:104)
at
org.apache.hadoop.hdfs.server.datanode.TestDirectoryScanner.testThrottling(TestDirectoryScanner.java:580)