See https://hudson.apache.org/hudson/job/Hadoop-Hdfs-trunk/578/

###################################################################################
########################## LAST 60 LINES OF THE CONSOLE 
###########################
[...truncated 662998 lines...]
    [junit]     at 
org.apache.hadoop.fi.DataTransferTestUtil$SleepAction.run(DataTransferTestUtil.java:1)
    [junit]     at 
org.apache.hadoop.fi.FiTestUtil$ActionContainer.run(FiTestUtil.java:116)
    [junit]     at 
org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects.ajc$before$org_apache_hadoop_hdfs_server_datanode_BlockReceiverAspects$7$b9c2bffe(BlockReceiverAspects.aj:193)
    [junit]     at 
org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:445)
    [junit]     at 
org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:633)
    [junit]     at 
org.apache.hadoop.hdfs.server.datanode.DataXceiver.opWriteBlock(DataXceiver.java:390)
    [junit]     at 
org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.opWriteBlock(DataTransferProtocol.java:389)
    [junit]     at 
org.apache.hadoop.hdfs.protocol.DataTransferProtocol$Receiver.processOp(DataTransferProtocol.java:331)
    [junit]     at 
org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:130)
    [junit]     at java.lang.Thread.run(Thread.java:619)
    [junit] Caused by: java.lang.InterruptedException: sleep interrupted
    [junit]     at java.lang.Thread.sleep(Native Method)
    [junit]     at org.apache.hadoop.fi.FiTestUtil.sleep(FiTestUtil.java:80)
    [junit]     ... 11 more
    [junit] 2011-02-10 12:47:13,695 INFO  datanode.DataNode 
(DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads 
is 0
    [junit] 2011-02-10 12:47:13,709 INFO  datanode.DataBlockScanner 
(DataBlockScanner.java:run(622)) - Exiting DataBlockScanner thread.
    [junit] 2011-02-10 12:47:13,795 INFO  datanode.DataNode 
(DataNode.java:run(1460)) - DatanodeRegistration(127.0.0.1:51219, 
storageID=DS-1434452765-127.0.1.1-51219-1297342022685, infoPort=48801, 
ipcPort=37337):Finishing DataNode in: 
FSDataset{dirpath='/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data1/current/finalized,/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build-fi/test/data/dfs/data/data2/current/finalized'}
    [junit] 2011-02-10 12:47:13,795 INFO  ipc.Server (Server.java:stop(1610)) - 
Stopping server on 37337
    [junit] 2011-02-10 12:47:13,796 INFO  datanode.DataNode 
(DataNode.java:shutdown(786)) - Waiting for threadgroup to exit, active threads 
is 0
    [junit] 2011-02-10 12:47:13,796 INFO  datanode.FSDatasetAsyncDiskService 
(FSDatasetAsyncDiskService.java:shutdown(133)) - Shutting down all async disk 
service threads...
    [junit] 2011-02-10 12:47:13,796 INFO  datanode.FSDatasetAsyncDiskService 
(FSDatasetAsyncDiskService.java:shutdown(142)) - All async disk service threads 
have been shut down.
    [junit] 2011-02-10 12:47:13,797 WARN  datanode.FSDatasetAsyncDiskService 
(FSDatasetAsyncDiskService.java:shutdown(130)) - AsyncDiskService has already 
shut down.
    [junit] 2011-02-10 12:47:13,898 WARN  namenode.FSNamesystem 
(FSNamesystem.java:run(2847)) - ReplicationMonitor thread received 
InterruptedException.java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-10 12:47:13,899 INFO  namenode.FSEditLog 
(FSEditLog.java:printStatistics(559)) - Number of transactions: 6 Total time 
for transactions(ms): 2Number of transactions batched in Syncs: 0 Number of 
syncs: 3 SyncTimes(ms): 4 4 
    [junit] 2011-02-10 12:47:13,898 WARN  namenode.DecommissionManager 
(DecommissionManager.java:run(70)) - Monitor interrupted: 
java.lang.InterruptedException: sleep interrupted
    [junit] 2011-02-10 12:47:13,900 INFO  ipc.Server (Server.java:stop(1610)) - 
Stopping server on 37835
    [junit] 2011-02-10 12:47:13,900 INFO  ipc.Server (Server.java:run(1443)) - 
IPC Server handler 0 on 37835: exiting
    [junit] 2011-02-10 12:47:13,901 INFO  ipc.Server (Server.java:run(1443)) - 
IPC Server handler 9 on 37835: exiting
    [junit] 2011-02-10 12:47:13,901 INFO  ipc.Server (Server.java:run(475)) - 
Stopping IPC Server listener on 37835
    [junit] 2011-02-10 12:47:13,901 INFO  ipc.Server (Server.java:run(675)) - 
Stopping IPC Server Responder
    [junit] 2011-02-10 12:47:13,901 INFO  ipc.Server (Server.java:run(1443)) - 
IPC Server handler 8 on 37835: exiting
    [junit] 2011-02-10 12:47:13,901 INFO  ipc.Server (Server.java:run(1443)) - 
IPC Server handler 3 on 37835: exiting
    [junit] 2011-02-10 12:47:13,901 INFO  ipc.Server (Server.java:run(1443)) - 
IPC Server handler 5 on 37835: exiting
    [junit] Tests run: 3, Failures: 0, Errors: 0, Time elapsed: 36.823 sec
    [junit] 2011-02-10 12:47:13,901 INFO  ipc.Server (Server.java:run(1443)) - 
IPC Server handler 6 on 37835: exiting
    [junit] 2011-02-10 12:47:13,902 INFO  ipc.Server (Server.java:run(1443)) - 
IPC Server handler 7 on 37835: exiting
    [junit] 2011-02-10 12:47:13,902 INFO  ipc.Server (Server.java:run(1443)) - 
IPC Server handler 2 on 37835: exiting
    [junit] 2011-02-10 12:47:13,902 INFO  ipc.Server (Server.java:run(1443)) - 
IPC Server handler 1 on 37835: exiting
    [junit] 2011-02-10 12:47:13,901 INFO  ipc.Server (Server.java:run(1443)) - 
IPC Server handler 4 on 37835: exiting

checkfailure:

-run-test-hdfs-fault-inject-withtestcaseonly:

run-test-hdfs-fault-inject:

BUILD FAILED
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build.xml:746: 
Tests failed!

Total time: 69 minutes 17 seconds
[FINDBUGS] Skipping publisher since build result is FAILURE
Publishing Javadoc
Archiving artifacts
Recording test results
Recording fingerprints
Publishing Clover coverage report...
No Clover report will be published due to a Build Failure
Email was triggered for: Failure
Sending email for trigger: Failure



###################################################################################
############################## FAILED TESTS (if any) 
##############################
3 tests failed.
REGRESSION:  
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite

Error Message:
java.io.FileNotFoundException: 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml
 (Too many open files)

Stack Trace:
java.lang.RuntimeException: java.io.FileNotFoundException: 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml
 (Too many open files)
        at 
org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1546)
        at 
org.apache.hadoop.conf.Configuration.loadResources(Configuration.java:1411)
        at 
org.apache.hadoop.conf.Configuration.getProps(Configuration.java:1357)
        at org.apache.hadoop.conf.Configuration.set(Configuration.java:600)
        at 
org.apache.hadoop.conf.Configuration.setBoolean(Configuration.java:804)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:313)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.runTestUnfinishedBlockCRCError(TestFileConcurrentReader.java:302)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.__CLR3_0_2u5mf5tro5(TestFileConcurrentReader.java:275)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorTransferToVerySmallWrite(TestFileConcurrentReader.java:274)
Caused by: java.io.FileNotFoundException: 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/classes/hdfs-default.xml
 (Too many open files)
        at java.io.FileInputStream.open(Native Method)
        at java.io.FileInputStream.<init>(FileInputStream.java:106)
        at java.io.FileInputStream.<init>(FileInputStream.java:66)
        at 
sun.net.www.protocol.file.FileURLConnection.connect(FileURLConnection.java:70)
        at 
sun.net.www.protocol.file.FileURLConnection.getInputStream(FileURLConnection.java:161)
        at 
com.sun.org.apache.xerces.internal.impl.XMLEntityManager.setupCurrentEntity(XMLEntityManager.java:653)
        at 
com.sun.org.apache.xerces.internal.impl.XMLVersionDetector.determineDocVersion(XMLVersionDetector.java:186)
        at 
com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:771)
        at 
com.sun.org.apache.xerces.internal.parsers.XML11Configuration.parse(XML11Configuration.java:737)
        at 
com.sun.org.apache.xerces.internal.parsers.XMLParser.parse(XMLParser.java:107)
        at 
com.sun.org.apache.xerces.internal.parsers.DOMParser.parse(DOMParser.java:225)
        at 
com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderImpl.parse(DocumentBuilderImpl.java:283)
        at javax.xml.parsers.DocumentBuilder.parse(DocumentBuilder.java:180)
        at 
org.apache.hadoop.conf.Configuration.loadResource(Configuration.java:1460)


REGRESSION:  
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransfer

Error Message:
Error while running command to get file permissions : java.io.IOException: 
Cannot run program "/bin/ls": java.io.IOException: error=24, Too many open 
files  at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)  at 
org.apache.hadoop.util.Shell.runCommand(Shell.java:206)  at 
org.apache.hadoop.util.Shell.run(Shell.java:188)  at 
org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:381)  at 
org.apache.hadoop.util.Shell.execCommand(Shell.java:467)  at 
org.apache.hadoop.util.Shell.execCommand(Shell.java:450)  at 
org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:571)
  at 
org.apache.hadoop.fs.RawLocalFileSystem.access$100(RawLocalFileSystem.java:50)  
at 
org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:492)
  at 
org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:467)
  at 
org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
  at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)  at 
org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1593)
  at 
org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1573)
  at 
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1519)
  at 
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1486)
  at 
org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:678)  
at 
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:483)
  at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)  at 
org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)  at 
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)  
at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
  at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
  at junit.framework.TestCase.runBare(TestCase.java:132)  at 
junit.framework.TestResult$1.protect(TestResult.java:110)  at 
junit.framework.TestResult.runProtected(TestResult.java:128)  at 
junit.framework.TestResult.run(TestResult.java:113)  at 
junit.framework.TestCase.run(TestCase.java:124)  at 
junit.framework.TestSuite.runTest(TestSuite.java:232)  at 
junit.framework.TestSuite.run(TestSuite.java:227)  at 
org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:83)  
at junit.framework.JUnit4TestAdapter.run(JUnit4TestAdapter.java:39)  at 
org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.run(JUnitTestRunner.java:420)
  at 
org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.launch(JUnitTestRunner.java:911)
  at 
org.apache.tools.ant.taskdefs.optional.junit.JUnitTestRunner.main(JUnitTestRunner.java:768)
 Caused by: java.io.IOException: java.io.IOException: error=24, Too many open 
files  at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)  at 
java.lang.ProcessImpl.start(ProcessImpl.java:65)  at 
java.lang.ProcessBuilder.start(ProcessBuilder.java:452)  ... 34 more 

Stack Trace:
java.lang.RuntimeException: Error while running command to get file permissions 
: java.io.IOException: Cannot run program "/bin/ls": java.io.IOException: 
error=24, Too many open files
        at java.lang.ProcessBuilder.start(ProcessBuilder.java:459)
        at org.apache.hadoop.util.Shell.runCommand(Shell.java:206)
        at org.apache.hadoop.util.Shell.run(Shell.java:188)
        at 
org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:381)
        at org.apache.hadoop.util.Shell.execCommand(Shell.java:467)
        at org.apache.hadoop.util.Shell.execCommand(Shell.java:450)
        at 
org.apache.hadoop.fs.RawLocalFileSystem.execCommand(RawLocalFileSystem.java:571)
        at 
org.apache.hadoop.fs.RawLocalFileSystem.access$100(RawLocalFileSystem.java:50)
        at 
org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:492)
        at 
org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:467)
        at 
org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
        at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1593)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1573)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1519)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1486)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:678)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:483)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)
Caused by: java.io.IOException: java.io.IOException: error=24, Too many open 
files
        at java.lang.UNIXProcess.<init>(UNIXProcess.java:148)
        at java.lang.ProcessImpl.start(ProcessImpl.java:65)
        at java.lang.ProcessBuilder.start(ProcessBuilder.java:452)

        at 
org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.loadPermissionInfo(RawLocalFileSystem.java:517)
        at 
org.apache.hadoop.fs.RawLocalFileSystem$RawLocalFileStatus.getPermission(RawLocalFileSystem.java:467)
        at 
org.apache.hadoop.util.DiskChecker.mkdirsWithExistsAndPermissionCheck(DiskChecker.java:131)
        at org.apache.hadoop.util.DiskChecker.checkDir(DiskChecker.java:148)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.getDataDirsFromURIs(DataNode.java:1593)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.makeInstance(DataNode.java:1573)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1519)
        at 
org.apache.hadoop.hdfs.server.datanode.DataNode.instantiateDataNode(DataNode.java:1486)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.startDataNodes(MiniDFSCluster.java:678)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:483)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)


REGRESSION:  
org.apache.hadoop.hdfs.TestFileConcurrentReader.testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite

Error Message:
Cannot lock storage 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
 The directory is already locked.

Stack Trace:
java.io.IOException: Cannot lock storage 
/grid/0/hudson/hudson-slave/workspace/Hadoop-Hdfs-trunk/trunk/build/test/data/dfs/name1.
 The directory is already locked.
        at 
org.apache.hadoop.hdfs.server.common.Storage$StorageDirectory.lock(Storage.java:615)
        at 
org.apache.hadoop.hdfs.server.namenode.FSImage.formatOccurred(FSImage.java:1165)
        at 
org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:558)
        at 
org.apache.hadoop.hdfs.server.namenode.NNStorage.format(NNStorage.java:577)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:1417)
        at 
org.apache.hadoop.hdfs.server.namenode.NameNode.format(NameNode.java:211)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster.initMiniDFSCluster(MiniDFSCluster.java:470)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:203)
        at org.apache.hadoop.hdfs.MiniDFSCluster.<init>(MiniDFSCluster.java:78)
        at 
org.apache.hadoop.hdfs.MiniDFSCluster$Builder.build(MiniDFSCluster.java:195)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.init(TestFileConcurrentReader.java:88)
        at 
org.apache.hadoop.hdfs.TestFileConcurrentReader.setUp(TestFileConcurrentReader.java:73)



Reply via email to