See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/569/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE 
###########################
[...truncated 672926 lines...]
    [junit] 2011-02-01 12:35:29,209 WARN  datanode.FSDatasetAsyncDiskService 
(FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already 
shut down.
    [junit] 2011-02-01 12:35:29,209 INFO  hdfs.MiniDFSCluster 
(MiniDFSCluster.java:shutdownDataNodes(835)) - Shutting down DataNode 0
    [junit] 2011-02-01 12:35:29,312 INFO  ipc.Server (Server.java:stop(1610)) - 
Stopping server on 48677
    [junit] 2011-02-01 12:35:29,312 INFO  ipc.Server (Server.java:run(1443)) - 
IPC Server handler 0 on 48677: exiting
    [junit] 2011-02-01 12:35:29,312 INFO  ipc.Server (Server.java:run(475)) - 
Stopping IPC Server listener on 48677
    [junit] 2011-02-01 12:35:29,313 INFO  ipc.Server (Server.java:run(675)) - 
Stopping IPC Server Responder
    [junit] 2011-02-01 12:35:29,312 INFO  datanode.DataNode 
(DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads 
is 1
    [junit] 2011-02-01 12:35:29,313 WARN  datanode.DataNode 
(DataXceiverServer.java:run(141)) - DatanodeRegistration(127.0.0.1:59600, 
storageID=DS-1043273588-127.0.1.1-59600-1296563718288, infoPort=40832, 
ipcPort=48677):DataXceiveServer: java.nio.channels.AsynchronousCloseException
    [junit]     at 
java.nio.channels.spi.AbstractInterruptibleChannel.end(AbstractInterruptibleChannel.java:185)
    [junit]     at 
sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:152)
    [junit]     at 
sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:84)
    [junit]     at 
org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:134)
    [junit]     at java.lang.Thread.run(Thread.java:619)
    [junit] 
    [junit] 2011-02-01 12:35:29,315 INFO  datanode.DataNode 
(DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads 
is 0
    [junit] 2011-02-01 12:35:29,331 INFO  datanode.DataBlockScanner 
(DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-02-01 12:35:29,416 INFO  datanode.DataNode 
(DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:59600, 
storageID=DS-1043273588-127.0.1.1-59600-1296563718288, infoPort=40832, 
ipcPort=48677):Finishing DataNode in: 
FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-02-01 12:35:29,416 INFO  ipc.Server (Server.java:stop(1610)) - 
Stopping server on 48677
    [junit] 2011-02-01 12:35:29,416 INFO  datanode.DataNode 
(DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads 
is 0
    [junit] 2011-02-01 12:35:29,416 INFO  datanode.FSDatasetAsyncDiskService 
(FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk 
service threads...
    [junit] 2011-02-01 12:35:29,416 INFO  datanode.FSDatasetAsyncDiskService 
(FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads 
have been shut down.
    [junit] 2011-02-01 12:35:29,417 WARN  datanode.FSDatasetAsyncDiskService 
(FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already 
shut down.
    [junit] 2011-02-01 12:35:29,529 WARN  namenode.DecommissionManager 
(DecommissionManager.java:run(70)) - Monitor interrupted: 
java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-01 12:35:29,528 WARN  namenode.FSNamesystem 
(FSNamesystem.java:run(2847)) - ReplicationMonitor thread received 
InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-01 12:35:29,529 INFO  namenode.FSEditLog 
(FSEditLog.java:printStatistics(595)) - Number of transactions: 6 Total time 
for transactions(ms): 1Number of transactions batched in Syncs: 0 Number of 
syncs: 3 SyncTimes(ms): 8 4 
    [junit] 2011-02-01 12:35:29,530 INFO  ipc.Server (Server.java:stop(1610)) - 
Stopping server on 42159
    [junit] 2011-02-01 12:35:29,531 INFO  ipc.Server (Server.java:run(1443)) - 
IPC Server handler 0 on 42159: exiting
    [junit] 2011-02-01 12:35:29,531 INFO  ipc.Server (Server.java:run(1443)) - 
IPC Server handler 6 on 42159: exiting
    [junit] 2011-02-01 12:35:29,531 INFO  ipc.Server (Server.java:run(475)) - 
Stopping IPC Server listener on 42159
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.765 sec
    [junit] 2011-02-01 12:35:29,535 INFO  ipc.Server (Server.java:run(1443)) - 
IPC Server handler 7 on 42159: exiting
    [junit] 2011-02-01 12:35:29,535 INFO  ipc.Server (Server.java:run(675)) - 
Stopping IPC Server Responder
    [junit] 2011-02-01 12:35:29,535 INFO  ipc.Server (Server.java:run(1443)) - 
IPC Server handler 4 on 42159: exiting
    [junit] 2011-02-01 12:35:29,536 INFO  ipc.Server (Server.java:run(1443)) - 
IPC Server handler 5 on 42159: exiting
    [junit] 2011-02-01 12:35:29,536 INFO  ipc.Server (Server.java:run(1443)) - 
IPC Server handler 2 on 42159: exiting
    [junit] 2011-02-01 12:35:29,538 INFO  ipc.Server (Server.java:run(1443)) - 
IPC Server handler 3 on 42159: exiting
    [junit] 2011-02-01 12:35:29,538 INFO  ipc.Server (Server.java:run(1443)) - 
IPC Server handler 9 on 42159: exiting
    [junit] 2011-02-01 12:35:29,539 INFO  ipc.Server (Server.java:run(1443)) - 
IPC Server handler 8 on 42159: exiting
    [junit] 2011-02-01 12:35:29,539 INFO  ipc.Server (Server.java:run(1443)) - 
IPC Server handler 1 on 42159: exiting

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: 
Tests failed!

Total time: 61 minutes 7 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) 
##############################
6 tests failed.
REGRESSION:  
org.apache.hadoop.hdfs.security.TestClientProtocolWithDelegationToken.testDelegationTokenRpc

Error Message:
null

Stack Trace:
java.lang.NullPointerException
        at 
org.apache.hadoop.ipc.WritableRpcEngine.getProxy(WritableRpcEngine.java:241)
        at org.apache.hadoop.ipc.RPC.getProtocolProxy(RPC.java:422)
        at org.apache.hadoop.ipc.RPC.getProtocolProxy(RPC.java:368)
        at org.apache.hadoop.ipc.RPC.getProtocolProxy(RPC.java:333)
        at org.apache.hadoop.ipc.RPC.getProtocolProxy(RPC.java:461)
        at org.apache.hadoop.ipc.RPC.getProxy(RPC.java:442)
        at 
org.apache.hadoop.hdfs.security.TestClientProtocolWithDelegationToken$1.run(TestClientProtocolWithDelegationToken.java:110)
        at java.security.AccessController.doPrivileged(Native Method)
        at javax.security.auth.Subject.doAs(Subject.java:396)
        at 
org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1142)
        at 
org.apache.hadoop.hdfs.security.TestClientProtocolWithDelegationToken.__CLR3_0_23sqt3nola(TestClientProtocolWithDelegationToken.java:105)
        at 
org.apache.hadoop.hdfs.security.TestClientProtocolWithDelegationToken.testDelegationTokenRpc(TestClientProtocolWithDelegationToken.java:77)


FAILED:  
org.apache.hadoop.hdfs.security.token.block.TestBlockToken.testBlockTokenRpc

Error Message:
null

Stack Trace:
java.lang.NullPointerException
        at 
org.apache.hadoop.ipc.WritableRpcEngine.getProxy(WritableRpcEngine.java:241)
        at org.apache.hadoop.ipc.RPC.getProtocolProxy(RPC.java:422)
        at org.apache.hadoop.ipc.RPC.getProtocolProxy(RPC.java:368)
        at org.apache.hadoop.ipc.RPC.getProxy(RPC.java:345)
        at 
org.apache.hadoop.hdfs.security.token.block.TestBlockToken.__CLR3_0_2i25d82ois(TestBlockToken.java:212)
        at 
org.apache.hadoop.hdfs.security.token.block.TestBlockToken.testBlockTokenRpc(TestBlockToken.java:185)


FAILED:  
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Cannot lock storage 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
 The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
 The directory is already locked.
        at 
org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1417)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:211)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
 The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
 The directory is already locked.
        at 
org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1342)
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.format(FSImage.java:1360)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1417)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:211)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


FAILED:  
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
Too many open files

Stack Trace:
java.io.IOException: Too many open files
        at sun.nio.ch.IOUtil.initPipe(Native Method)
        at sun.nio.ch.EPollSelectorImpl.<init>(EPollSelectorImpl.java:49)
        at 
sun.nio.ch.EPollSelectorProvider.openSelector(EPollSelectorProvider.java:18)
        at java.nio.channels.Selector.open(Selector.java:209)
        at org.apache.hadoop.ipc.Server$Responder.<init>(Server.java:602)
        at org.apache.hadoop.ipc.Server.<init>(Server.java:1510)
        at org.apache.hadoop.ipc.RPC$Server.<init>(RPC.java:576)
        at 
org.apache.hadoop.ipc.WritableRpcEngine$Server.<init>(WritableRpcEngine.java:338)
        at 
org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:298)
        at 
org.apache.hadoop.ipc.WritableRpcEngine.getServer(WritableRpcEngine.java:46)
        at org.apache.hadoop.ipc.RPC.getServer(RPC.java:550)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.initIpcServer(DataNode.java:422)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.startDataNode(DataNode.java:513)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:283)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.<init>(DataNode.java:265)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1576)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1519)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1486)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:678)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:483)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:315)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5trj9(TestFileConcurrentReader.java:275)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)


FAILED:  
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore

Error Message:
Image file 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage
 is corrupt with MD5 checksum of 4b0319f5f17997e862e5ec978d475775 but expecting 
a365969efc0d144bbd316c6e12a46ae7

Stack Trace:
java.io.IOException: Image file 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/secondary/current/fsimage
 is corrupt with MD5 checksum of 4b0319f5f17997e862e5ec978d475775 but expecting 
a365969efc0d144bbd316c6e12a46ae7
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.loadFSImage(FSImage.java:1063)
        at 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.doMerge(SecondaryNameNode.java:702)
        at 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode$CheckpointStorage.access$500(SecondaryNameNode.java:600)
        at 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doMerge(SecondaryNameNode.java:477)
        at 
org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.doCheckpoint(SecondaryNameNode.java:438)
        at 
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.__CLR3_0_2dn2tm4u8u(TestStorageRestore.java:316)
        at 
org.apache.hadoop.hdfs.server.namenode.TestStorageRestore.testStorageRestore(TestStorageRestore.java:286)



Reply via email to