[
https://issues.apache.org/jira/browse/SPARK-4515?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
]
Nishkam Ravi updated SPARK-4515:
--------------------------------
Description:
OOM/GC errors with sort-based shuffle that go away when shuffle.manager is set
to hash. Also observing memory leak issues. Can be reproduced with PageRank
with a large enough input dataset. Sample stack trace:
19:22:14 WARN TaskSetManager: Lost task 56.0 in stage 1.0 (TID 298,
c1706.halxg.cloudera.com): java.lang.OutOfMemoryError: Java heap space
com.ning.compress.BufferRecycler.allocDecodeBuffer(BufferRecycler.java:137)
com.ning.compress.lzf.LZFInputStream.<init>(LZFInputStream.java:111)
com.ning.compress.lzf.LZFInputStream.<init>(LZFInputStream.java:97)
com.ning.compress.lzf.LZFInputStream.<init>(LZFInputStream.java:80)
org.apache.spark.io.LZFCompressionCodec.compressedInputStream(CompressionCodec.scala:107)
org.apache.spark.storage.BlockManager.wrapForCompression(BlockManager.scala:1155)
org.apache.spark.util.collection.ExternalSorter$SpillReader.nextBatchStream(ExternalSorter.scala:559)
org.apache.spark.util.collection.ExternalSorter$SpillReader.<init>(ExternalSorter.scala:531)
org.apache.spark.util.collection.ExternalSorter$$anonfun$6.apply(ExternalSorter.scala:379)
org.apache.spark.util.collection.ExternalSorter$$anonfun$6.apply(ExternalSorter.scala:379)
scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
scala.collection.TraversableLike$class.map(TraversableLike.scala:244)
scala.collection.AbstractTraversable.map(Traversable.scala:105)
org.apache.spark.util.collection.ExternalSorter.merge(ExternalSorter.scala:379)
org.apache.spark.util.collection.ExternalSorter.partitionedIterator(ExternalSorter.scala:686)
org.apache.spark.util.collection.ExternalSorter.writePartitionedFile(ExternalSorter.scala:739)
org.apache.spark.shuffle.sort.SortShuffleWriter.write(SortShuffleWriter.scala:70)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:68)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
org.apache.spark.scheduler.Task.run(Task.scala:56)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:195)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
java.lang.Thread.run(Thread.java:745)
was:
OOM/GC errors with sort-based shuffle that go away when shuffle.manager is set
to hash. Also observing memory leak issues. Can be reproduced with PageRank
with a large enough input dataset. Sample stack trace:
19:22:14 WARN TaskSetManager: Lost task 56.0 in stage 1.0 (TID 298,
c1706.halxg.cloudera.com): java.lang.OutOfMemoryError: Java heap space
com.ning.compress.BufferRecycler.allocDecodeBuffer(BufferRecycler.java:137)
com.ning.compress.lzf.LZFInputStream.<init>(LZFInputStream.java:111)
com.ning.compress.lzf.LZFInputStream.<init>(LZFInputStream.java:97)
com.ning.compress.lzf.LZFInputStream.<init>(LZFInputStream.java:80)
org.apache.spark.io.LZFCompressionCodec.compressedInputStream(CompressionCodec.scala:107)
org.apache.spark.storage.BlockManager.wrapForCompression(BlockManager.scala:1155)
org.apache.spark.util.collection.ExternalSorter$SpillReader.nextBatchStream(ExternalSorter.scala:559)
org.apache.spark.util.collection.ExternalSorter$SpillReader.<init>(ExternalSorter.scala:531)
org.apache.spark.util.collection.ExternalSorter$$anonfun$6.apply(ExternalSorter.scala:379)
org.apache.spark.util.collection.ExternalSorter$$anonfun$6.apply(ExternalSorter.scala:379)
scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
scala.collection.TraversableLike$class.map(TraversableLike.scala:244)
scala.collection.AbstractTraversable.map(Traversable.scala:105)
org.apache.spark.util.collection.ExternalSorter.merge(ExternalSorter.scala:379)
org.apache.spark.util.collection.ExternalSorter.partitionedIterator(ExternalSorter.scala:686)
org.apache.spark.util.collection.ExternalSorter.writePartitionedFile(ExternalSorter.scala:739)
org.apache.spark.shuffle.sort.SortShuffleWriter.write(SortShuffleWriter.scala:70)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:68)
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
org.apache.spark.scheduler.Task.run(Task.scala:56)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:195)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
java.lang.Thread.run(Thread.java:745)
Exception in thread "main" org.apache.spark.SparkException: Job aborted due to
stage failure: Total size of serialized results of 224 tasks (1024.5 MB) is
bigger than maxResultSize (1024.0 MB)
at
org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1206)
at
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1195)
at
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1194)
at
scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
at
org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1194)
at
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:697)
at
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:697)
at scala.Option.foreach(Option.scala:236)
at
org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:697)
at
org.apache.spark.scheduler.DAGSchedulerEventProcessActor$$anonfun$receive$2.applyOrElse(DAGScheduler.scala:1412)
at akka.actor.ActorCell.receiveMessage(ActorCell.scala:498)
at akka.actor.ActorCell.invoke(ActorCell.scala:456)
at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:237)
at akka.dispatch.Mailbox.run(Mailbox.scala:219)
at
akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:386)
at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
at
scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
at
scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
at
scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)
> OOM/GC errors with sort-based shuffle
> -------------------------------------
>
> Key: SPARK-4515
> URL: https://issues.apache.org/jira/browse/SPARK-4515
> Project: Spark
> Issue Type: Bug
> Components: Shuffle
> Affects Versions: 1.2.0
> Reporter: Nishkam Ravi
>
> OOM/GC errors with sort-based shuffle that go away when shuffle.manager is
> set to hash. Also observing memory leak issues. Can be reproduced with
> PageRank with a large enough input dataset. Sample stack trace:
> 19:22:14 WARN TaskSetManager: Lost task 56.0 in stage 1.0 (TID 298,
> c1706.halxg.cloudera.com): java.lang.OutOfMemoryError: Java heap space
>
> com.ning.compress.BufferRecycler.allocDecodeBuffer(BufferRecycler.java:137)
> com.ning.compress.lzf.LZFInputStream.<init>(LZFInputStream.java:111)
> com.ning.compress.lzf.LZFInputStream.<init>(LZFInputStream.java:97)
> com.ning.compress.lzf.LZFInputStream.<init>(LZFInputStream.java:80)
>
> org.apache.spark.io.LZFCompressionCodec.compressedInputStream(CompressionCodec.scala:107)
>
> org.apache.spark.storage.BlockManager.wrapForCompression(BlockManager.scala:1155)
>
> org.apache.spark.util.collection.ExternalSorter$SpillReader.nextBatchStream(ExternalSorter.scala:559)
>
> org.apache.spark.util.collection.ExternalSorter$SpillReader.<init>(ExternalSorter.scala:531)
>
> org.apache.spark.util.collection.ExternalSorter$$anonfun$6.apply(ExternalSorter.scala:379)
>
> org.apache.spark.util.collection.ExternalSorter$$anonfun$6.apply(ExternalSorter.scala:379)
>
> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
>
> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
>
> scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
> scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
> scala.collection.TraversableLike$class.map(TraversableLike.scala:244)
> scala.collection.AbstractTraversable.map(Traversable.scala:105)
>
> org.apache.spark.util.collection.ExternalSorter.merge(ExternalSorter.scala:379)
>
> org.apache.spark.util.collection.ExternalSorter.partitionedIterator(ExternalSorter.scala:686)
>
> org.apache.spark.util.collection.ExternalSorter.writePartitionedFile(ExternalSorter.scala:739)
>
> org.apache.spark.shuffle.sort.SortShuffleWriter.write(SortShuffleWriter.scala:70)
>
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:68)
>
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
> org.apache.spark.scheduler.Task.run(Task.scala:56)
> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:195)
>
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
> java.lang.Thread.run(Thread.java:745)
--
This message was sent by Atlassian JIRA
(v6.3.4#6332)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]