I have done multiple jstacks, and here is when memory grows up a lot :
"Thread-13" prio=6 tid=0x0000000009a8f000 nid=0x102c runnable
[0x000000000ba3f000]
java.lang.Thread.State: RUNNABLE
at java.io.FileOutputStream.writeBytes(Native Method)
at java.io.FileOutputStream.write(FileOutputStream.java:260)
at
org.apache.hadoop.fs.RawLocalFileSystem$LocalFSFileOutputStream.write(RawLocalFileSystem.java:190)
at
java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:65)
at java.io.BufferedOutputStream.write(BufferedOutputStream.java:109)
- locked <0x0000000550c51320> (a java.io.BufferedOutputStream)
at
org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:49)
at java.io.DataOutputStream.write(DataOutputStream.java:90)
- locked <0x0000000550c512e0> (a
org.apache.hadoop.fs.FSDataOutputStream)
at
org.apache.hadoop.fs.ChecksumFileSystem$ChecksumFSOutputSummer.writeChunk(ChecksumFileSystem.java:354)
at
org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunk(FSOutputSummer.java:150)
at
org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:132)
- locked <0x0000000550c51080> (a
org.apache.hadoop.fs.ChecksumFileSystem$ChecksumFSOutputSummer)
at
org.apache.hadoop.fs.FSOutputSummer.flushBuffer(FSOutputSummer.java:121)
- locked <0x0000000550c51080> (a
org.apache.hadoop.fs.ChecksumFileSystem$ChecksumFSOutputSummer)
at org.apache.hadoop.fs.FSOutputSummer.write1(FSOutputSummer.java:112)
at org.apache.hadoop.fs.FSOutputSummer.write(FSOutputSummer.java:86)
- locked <0x0000000550c51080> (a
org.apache.hadoop.fs.ChecksumFileSystem$ChecksumFSOutputSummer)
at
org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(FSDataOutputStream.java:49)
at java.io.DataOutputStream.write(DataOutputStream.java:90)
- locked <0x0000000550c51040> (a
org.apache.hadoop.fs.FSDataOutputStream)
at
org.apache.hadoop.io.SequenceFile$Writer.append(SequenceFile.java:1013)
- locked <0x0000000550c50ff0> (a
org.apache.hadoop.io.SequenceFile$Writer)
at
org.apache.hadoop.mapred.SequenceFileOutputFormat$1.write(SequenceFileOutputFormat.java:75)
at
org.apache.hadoop.mapred.lib.MultipleOutputFormat$1.write(MultipleOutputFormat.java:102)
at org.apache.hadoop.mapred.ReduceTask$3.collect(ReduceTask.java:440)
at org.apache.nutch.crawl.Generator$Selector.reduce(Generator.java:290)
at org.apache.nutch.crawl.Generator$Selector.reduce(Generator.java:109)
at
org.apache.hadoop.mapred.ReduceTask.runOldReducer(ReduceTask.java:463)
at org.apache.hadoop.mapred.ReduceTask.run(ReduceTask.java:411)
at
org.apache.hadoop.mapred.LocalJobRunner$Job.run(LocalJobRunner.java:216)
--
View this message in context:
http://lucene.472066.n3.nabble.com/Nutch-1-2-performance-and-memory-issues-tp2407256p2417398.html
Sent from the Nutch - User mailing list archive at Nabble.com.