See https://builds.apache.org/job/Hadoop-Hdfs-trunk/2638/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE 
###########################
[...truncated 6423 lines...]
[INFO] 
[INFO] --- maven-antrun-plugin:1.7:run (create-testdirs) @ hadoop-hdfs-project 
---
[INFO] Executing tasks

main:
    [mkdir] Created dir: 
/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/target/test-dir
[INFO] Executed tasks
[INFO] 
[INFO] --- maven-source-plugin:2.3:jar-no-fork (hadoop-java-sources) @ 
hadoop-hdfs-project ---
[INFO] 
[INFO] --- maven-source-plugin:2.3:test-jar-no-fork (hadoop-java-sources) @ 
hadoop-hdfs-project ---
[INFO] 
[INFO] --- maven-enforcer-plugin:1.3.1:enforce (dist-enforce) @ 
hadoop-hdfs-project ---
[INFO] 
[INFO] --- maven-site-plugin:3.4:attach-descriptor (attach-descriptor) @ 
hadoop-hdfs-project ---
[INFO] 
[INFO] --- maven-javadoc-plugin:2.8.1:jar (module-javadocs) @ 
hadoop-hdfs-project ---
[INFO] Skipping javadoc generation
[INFO] 
[INFO] --- maven-enforcer-plugin:1.3.1:enforce (depcheck) @ hadoop-hdfs-project 
---
[INFO] 
[INFO] --- maven-checkstyle-plugin:2.15:checkstyle (default-cli) @ 
hadoop-hdfs-project ---
[INFO] 
[INFO] --- findbugs-maven-plugin:3.0.0:findbugs (default-cli) @ 
hadoop-hdfs-project ---
[INFO] ------------------------------------------------------------------------
[INFO] Reactor Summary:
[INFO] 
[INFO] Apache Hadoop HDFS Client ......................... SUCCESS [03:54 min]
[INFO] Apache Hadoop HDFS ................................ FAILURE [  03:11 h]
[INFO] Apache Hadoop HDFS Native Client .................. SKIPPED
[INFO] Apache Hadoop HttpFS .............................. SKIPPED
[INFO] Apache Hadoop HDFS BookKeeper Journal ............. SKIPPED
[INFO] Apache Hadoop HDFS-NFS ............................ SKIPPED
[INFO] Apache Hadoop HDFS Project ........................ SUCCESS [  0.057 s]
[INFO] ------------------------------------------------------------------------
[INFO] BUILD FAILURE
[INFO] ------------------------------------------------------------------------
[INFO] Total time: 03:15 h
[INFO] Finished at: 2015-12-17T21:55:17+00:00
[INFO] Final Memory: 55M/582M
[INFO] ------------------------------------------------------------------------
[ERROR] Failed to execute goal 
org.apache.maven.plugins:maven-surefire-plugin:2.17:test (default-test) on 
project hadoop-hdfs: ExecutionException: java.lang.RuntimeException: The forked 
VM terminated without properly saying goodbye. VM crash or System.exit called?
[ERROR] Command was /bin/sh -c cd 
/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs
 && /home/jenkins/tools/java/jdk1.7.0_55/jre/bin/java -Xmx2048m 
-XX:MaxPermSize=768m -XX:+HeapDumpOnOutOfMemoryError -jar 
/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/surefire/surefirebooter4075307003129168319.jar
 
/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/surefire/surefire6043025646050051366tmp
 
/home/jenkins/jenkins-slave/workspace/Hadoop-Hdfs-trunk/hadoop-hdfs-project/hadoop-hdfs/target/surefire/surefire_5167702086034950429430tmp
[ERROR] -> [Help 1]
[ERROR] 
[ERROR] To see the full stack trace of the errors, re-run Maven with the -e 
switch.
[ERROR] Re-run Maven using the -X switch to enable full debug logging.
[ERROR] 
[ERROR] For more information about the errors and possible solutions, please 
read the following articles:
[ERROR] [Help 1] 
http://cwiki.apache.org/confluence/display/MAVEN/MojoFailureException
[ERROR] 
[ERROR] After correcting the problems, you can resume the build with the command
[ERROR]   mvn <goals> -rf :hadoop-hdfs
Build step 'Execute shell' marked build as failure
Archiving artifacts
Recording test results
Sending e-mails to: hdfs-dev@hadoop.apache.org
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any



###################################################################################
############################## FAILED TESTS (if any) 
##############################
3 tests failed.
FAILED:  
org.apache.hadoop.hdfs.TestDFSUpgradeFromImage.testUpgradeFromRel2ReservedImage

Error Message:
Failed to load an FSImage file!

Stack Trace:
java.io.IOException: Failed to load an FSImage file!
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:683)
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.doUpgrade(FSImage.java:390)
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.recoverTransitionRead(FSImage.java:277)
        at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFSImage(FSNamesystem.java:943)
        at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.loadFromDisk(FSNamesystem.java:653)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.loadNamesystem(NameNode.java:621)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.initialize(NameNode.java:678)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:884)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.<init>(NameNode.java:863)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.createNameNode(NameNode.java:1565)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.createNameNode(MiniDFSCluster.java:1247)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.configureNameService(MiniDFSCluster.java:1016)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.createNameNodesAndSetConf(MiniDFSCluster.java:891)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:823)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:482)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:441)
        at 
org.apache.hadoop.hdfs.TestDFSUpgradeFromImage.testUpgradeFromRel2ReservedImage(TestDFSUpgradeFromImage.java:483)


FAILED:  
org.apache.hadoop.hdfs.TestDFSUpgradeFromImage.testUpgradeFromRel1BBWImage

Error Message:
Cannot obtain block length for 
LocatedBlock{BP-2100951145-67.195.81.146-1450379530244:blk_7162739548153522810_1020;
 getBlockSize()=1024; corrupt=false; offset=0; 
locs=[DatanodeInfoWithStorage[127.0.0.1:34413,DS-7d311b1e-032e-4240-af8b-4fab21189c5a,DISK]]}

Stack Trace:
java.io.IOException: Cannot obtain block length for 
LocatedBlock{BP-2100951145-67.195.81.146-1450379530244:blk_7162739548153522810_1020;
 getBlockSize()=1024; corrupt=false; offset=0; 
locs=[DatanodeInfoWithStorage[127.0.0.1:34413,DS-7d311b1e-032e-4240-af8b-4fab21189c5a,DISK]]}
        at 
org.apache.hadoop.hdfs.DFSInputStream.readBlockLength(DFSInputStream.java:399)
        at 
org.apache.hadoop.hdfs.DFSInputStream.fetchLocatedBlocksAndGetLastBlockLength(DFSInputStream.java:343)
        at 
org.apache.hadoop.hdfs.DFSInputStream.openInfo(DFSInputStream.java:275)
        at org.apache.hadoop.hdfs.DFSInputStream.<init>(DFSInputStream.java:265)
        at org.apache.hadoop.hdfs.DFSClient.open(DFSClient.java:1046)
        at org.apache.hadoop.hdfs.DFSClient.open(DFSClient.java:1011)
        at 
org.apache.hadoop.hdfs.TestDFSUpgradeFromImage.dfsOpenFileWithRetries(TestDFSUpgradeFromImage.java:177)
        at 
org.apache.hadoop.hdfs.TestDFSUpgradeFromImage.verifyDir(TestDFSUpgradeFromImage.java:213)
        at 
org.apache.hadoop.hdfs.TestDFSUpgradeFromImage.verifyFileSystem(TestDFSUpgradeFromImage.java:228)
        at 
org.apache.hadoop.hdfs.TestDFSUpgradeFromImage.upgradeAndVerify(TestDFSUpgradeFromImage.java:600)
        at 
org.apache.hadoop.hdfs.TestDFSUpgradeFromImage.testUpgradeFromRel1BBWImage(TestDFSUpgradeFromImage.java:622)


FAILED:  
org.apache.hadoop.hdfs.server.namenode.TestNNThroughputBenchmark.testNNThroughput

Error Message:
Not replicated yet: 
/nnThroughputBenchmark/blockReport/ThroughputBenchDir0/ThroughputBench24

Stack Trace:
org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException: Not 
replicated yet: 
/nnThroughputBenchmark/blockReport/ThroughputBenchDir0/ThroughputBench24
        at 
org.apache.hadoop.hdfs.server.namenode.FSDirWriteFileOp.validateAddBlock(FSDirWriteFileOp.java:190)
        at 
org.apache.hadoop.hdfs.server.namenode.FSNamesystem.getAdditionalBlock(FSNamesystem.java:2379)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.addBlock(NameNodeRpcServer.java:797)
        at 
org.apache.hadoop.hdfs.server.namenode.NNThroughputBenchmark$BlockReportStats.addBlocks(NNThroughputBenchmark.java:1184)
        at 
org.apache.hadoop.hdfs.server.namenode.NNThroughputBenchmark$BlockReportStats.generateInputs(NNThroughputBenchmark.java:1171)
        at 
org.apache.hadoop.hdfs.server.namenode.NNThroughputBenchmark$OperationStatsBase.benchmark(NNThroughputBenchmark.java:281)
        at 
org.apache.hadoop.hdfs.server.namenode.NNThroughputBenchmark.run(NNThroughputBenchmark.java:1519)
        at 
org.apache.hadoop.hdfs.server.namenode.NNThroughputBenchmark.runBenchmark(NNThroughputBenchmark.java:1422)
        at 
org.apache.hadoop.hdfs.server.namenode.TestNNThroughputBenchmark.testNNThroughput(TestNNThroughputBenchmark.java:53)


Reply via email to