See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/565/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE 
###########################
[...truncated 344613 lines...]
    [junit] 2011-01-28 16:47:24,295 INFO  hdfs.StateChange 
(BlockManager.java:computeReplicationWorkForBlock(935)) - BLOCK* ask 
127.0.0.1:56404 to replicate blk_4801764326403794770_1017 to datanode(s) 
127.0.0.1:50485
    [junit] 2011-01-28 16:47:24,295 INFO  hdfs.StateChange 
(BlockManager.java:computeReplicationWorkForBlock(935)) - BLOCK* ask 
127.0.0.1:56404 to replicate blk_6965342782379580550_1019 to datanode(s) 
127.0.0.1:50485
    [junit] 2011-01-28 16:47:25,321 INFO  datanode.DataNode 
(DataNode.java:transferBlock(1213)) - DatanodeRegistration(127.0.0.1:56404, 
storageID=DS-1165592067-127.0.1.1-56404-1296232639099, infoPort=37897, 
ipcPort=39910) Starting thread to transfer block blk_4801764326403794770_1017 
to 127.0.0.1:50485 
    [junit] 2011-01-28 16:47:25,321 INFO  datanode.DataNode 
(DataNode.java:transferBlock(1213)) - DatanodeRegistration(127.0.0.1:56404, 
storageID=DS-1165592067-127.0.1.1-56404-1296232639099, infoPort=37897, 
ipcPort=39910) Starting thread to transfer block blk_6965342782379580550_1019 
to 127.0.0.1:50485 
    [junit] 2011-01-28 16:47:25,322 WARN  datanode.DataNode 
(DataNode.java:run(1402)) - DatanodeRegistration(127.0.0.1:56404, 
storageID=DS-1165592067-127.0.1.1-56404-1296232639099, infoPort=37897, 
ipcPort=39910):Failed to transfer blk_4801764326403794770_1017 to 
127.0.0.1:50485 got java.net.ConnectException: Connection refused
    [junit]     at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
    [junit]     at 
sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:574)
    [junit]     at 
org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
    [junit]     at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:373)
    [junit]     at 
org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:1370)
    [junit]     at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-01-28 16:47:25,322 WARN  datanode.DataNode 
(DataNode.java:run(1402)) - DatanodeRegistration(127.0.0.1:56404, 
storageID=DS-1165592067-127.0.1.1-56404-1296232639099, infoPort=37897, 
ipcPort=39910):Failed to transfer blk_6965342782379580550_1019 to 
127.0.0.1:50485 got java.net.ConnectException: Connection refused
    [junit]     at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
    [junit]     at 
sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:574)
    [junit]     at 
org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
    [junit]     at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:373)
    [junit]     at 
org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:1370)
    [junit]     at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-01-28 16:47:25,926 INFO  datanode.DataNode 
(DataNode.java:transferBlock(1213)) - DatanodeRegistration(127.0.0.1:36514, 
storageID=DS-893032692-127.0.1.1-36514-1296232639714, infoPort=57423, 
ipcPort=37393) Starting thread to transfer block blk_4164803938547019182_1016 
to 127.0.0.1:50485 
    [junit] 2011-01-28 16:47:25,926 INFO  datanode.DataNode 
(DataNode.java:transferBlock(1213)) - DatanodeRegistration(127.0.0.1:36514, 
storageID=DS-893032692-127.0.1.1-36514-1296232639714, infoPort=57423, 
ipcPort=37393) Starting thread to transfer block blk_4278625428280162445_1020 
to 127.0.0.1:50485 
    [junit] 2011-01-28 16:47:25,926 WARN  datanode.DataNode 
(DataNode.java:run(1402)) - DatanodeRegistration(127.0.0.1:36514, 
storageID=DS-893032692-127.0.1.1-36514-1296232639714, infoPort=57423, 
ipcPort=37393):Failed to transfer blk_4164803938547019182_1016 to 
127.0.0.1:50485 got java.net.ConnectException: Connection refused
    [junit]     at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
    [junit]     at 
sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:574)
    [junit]     at 
org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
    [junit]     at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:373)
    [junit]     at 
org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:1370)
    [junit]     at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-01-28 16:47:25,927 WARN  datanode.DataNode 
(DataNode.java:run(1402)) - DatanodeRegistration(127.0.0.1:36514, 
storageID=DS-893032692-127.0.1.1-36514-1296232639714, infoPort=57423, 
ipcPort=37393):Failed to transfer blk_4278625428280162445_1020 to 
127.0.0.1:50485 got java.net.ConnectException: Connection refused
    [junit]     at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
    [junit]     at 
sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:574)
    [junit]     at 
org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
    [junit]     at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:373)
    [junit]     at 
org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:1370)
    [junit]     at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-01-28 16:47:27,296 INFO  hdfs.StateChange 
(BlockManager.java:computeReplicationWorkForBlock(935)) - BLOCK* ask 
127.0.0.1:36514 to replicate blk_9000474117190497559_1015 to datanode(s) 
127.0.0.1:50485
    [junit] 2011-01-28 16:47:28,927 INFO  datanode.DataNode 
(DataNode.java:transferBlock(1213)) - DatanodeRegistration(127.0.0.1:36514, 
storageID=DS-893032692-127.0.1.1-36514-1296232639714, infoPort=57423, 
ipcPort=37393) Starting thread to transfer block blk_9000474117190497559_1015 
to 127.0.0.1:50485 
    [junit] 2011-01-28 16:47:28,927 WARN  datanode.DataNode 
(DataNode.java:run(1402)) - DatanodeRegistration(127.0.0.1:36514, 
storageID=DS-893032692-127.0.1.1-36514-1296232639714, infoPort=57423, 
ipcPort=37393):Failed to transfer blk_9000474117190497559_1015 to 
127.0.0.1:50485 got java.net.ConnectException: Connection refused
    [junit]     at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
    [junit]     at 
sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:574)
    [junit]     at 
org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:206)
    [junit]     at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:373)
    [junit]     at 
org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:1370)
    [junit]     at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] /homes/hudson/tools/java/jdk1.6.0_11-32/jre/lib/rt.jar: error 
reading zip file
Build timed out. Aborting
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) 
##############################
5 tests failed.
FAILED:  TEST-org.apache.hadoop.hdfs.TestFileCreationClient.xml.<init>

Error Message:


Stack Trace:
Test report file 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/TEST-org.apache.hadoop.hdfs.TestFileCreationClient.xml
 was length 0

FAILED:  
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
 The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
 The directory is already locked.
        at 
org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
 The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
 The directory is already locked.
        at 
org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1408)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:202)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
        at sun.nio.ch.IOUtil.initPipe(Native Method)
        at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
        at 
sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
        at java.nio.channels.Selector.open(Selector.java:209)
        at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
        at org.apache.hadoop.ipc.Server.<init>(Server.java:1510)
        at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:408)
        at 
org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:332)
        at 
org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:292)
        at 
org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:47)
        at org.apache.hadoop.ipc.RPC.getServer(RPC.java:382)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:421)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:512)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:282)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:264)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1575)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1518)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1485)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:678)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:483)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage
 is corrupt with MD5 checksum of 2af0a037575b78fd3e690135ffb0d8c7 but expecting 
23e55a458d1b3e2a8972fe15879ea798

Stack Trace:
java.io.IOException: Image file 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage
 is corrupt with MD5 checksum of 2af0a037575b78fd3e690135ffb0d8c7 but expecting 
23e55a458d1b3e2a8972fe15879ea798
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
        at 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
        at 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
        at 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
        at 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
        at 
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4u8h(TestStorageRestore.java:316)
        at 
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)



Reply via email to