[
https://issues.apache.org/jira/browse/SPARK-2898?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
]
Davies Liu updated SPARK-2898:
------------------------------
Description:
--------------------------------------------------------------------
Java options: -Dspark.storage.memoryFraction=0.66
-Dspark.serializer=org.apache.spark.serializer.JavaSerializer
-Dspark.executor.memory=3g -Dspark.locality.wait=60000000
Options: SchedulerThroughputTest --num-tasks=10000 --num-trials=4
--inter-trial-wait=1
--------------------------------------------------------------------
14/08/06 22:09:41 WARN JettyUtils: Failed to create UI on port 4040. Trying
again on port 4041. - Failure(java.net.BindException: Address already in use)
worker 50114 crashed abruptly with exit status 1
14/08/06 22:10:37 ERROR Executor: Exception in task 1476.0 in stage 1.0 (TID
11476)
org.apache.spark.SparkException: Python worker exited unexpectedly (crashed)
at
org.apache.spark.api.python.PythonRDD$$anon$1.read(PythonRDD.scala:150)
at
org.apache.spark.api.python.PythonRDD$$anon$1.<init>(PythonRDD.scala:154)
at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:87)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62)
at org.apache.spark.scheduler.Task.run(Task.scala:54)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:199)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.io.EOFException
at java.io.DataInputStream.readInt(DataInputStream.java:392)
at
org.apache.spark.api.python.PythonRDD$$anon$1.read(PythonRDD.scala:101)
... 10 more
14/08/06 22:10:37 WARN PythonWorkerFactory: Failed to open socket to Python
daemon:
java.net.ConnectException: Connection refused
at java.net.PlainSocketImpl.socketConnect(Native Method)
at
java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:339)
at
java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:200)
at
java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:182)
at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392)
at java.net.Socket.connect(Socket.java:579)
at java.net.Socket.connect(Socket.java:528)
at java.net.Socket.<init>(Socket.java:425)
at java.net.Socket.<init>(Socket.java:241)
at
org.apache.spark.api.python.PythonWorkerFactory.createSocket$1(PythonWorkerFactory.scala:68)
at
org.apache.spark.api.python.PythonWorkerFactory.liftedTree1$1(PythonWorkerFactory.scala:83)
at
org.apache.spark.api.python.PythonWorkerFactory.createThroughDaemon(PythonWorkerFactory.scala:82)
at
org.apache.spark.api.python.PythonWorkerFactory.create(PythonWorkerFactory.scala:55)
at org.apache.spark.SparkEnv.createPythonWorker(SparkEnv.scala:101)
at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:66)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62)
at org.apache.spark.scheduler.Task.run(Task.scala:54)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:199)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
14/08/06 22:10:37 ERROR Executor: Exception in task 1478.0 in stage 1.0 (TID
11478)
java.io.EOFException
at java.io.DataInputStream.readInt(DataInputStream.java:392)
at
org.apache.spark.api.python.PythonWorkerFactory.createSocket$1(PythonWorkerFactory.scala:69)
at
org.apache.spark.api.python.PythonWorkerFactory.liftedTree1$1(PythonWorkerFactory.scala:83)
at
org.apache.spark.api.python.PythonWorkerFactory.createThroughDaemon(PythonWorkerFactory.scala:82)
at
org.apache.spark.api.python.PythonWorkerFactory.create(PythonWorkerFactory.scala:55)
at org.apache.spark.SparkEnv.createPythonWorker(SparkEnv.scala:101)
at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:66)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62)
at org.apache.spark.scheduler.Task.run(Task.scala:54)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:199)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
14/08/06 22:10:37 WARN PythonWorkerFactory: Assuming that daemon unexpectedly
quit, attempting to restart
14/08/06 22:10:37 WARN TaskSetManager: Lost task 1476.0 in stage 1.0 (TID
11476, localhost): org.apache.spark.SparkException: Python worker exited
unexpectedly (crashed)
org.apache.spark.api.python.PythonRDD$$anon$1.read(PythonRDD.scala:150)
org.apache.spark.api.python.PythonRDD$$anon$1.<init>(PythonRDD.scala:154)
org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:87)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62)
org.apache.spark.scheduler.Task.run(Task.scala:54)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:199)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
java.lang.Thread.run(Thread.java:745)
14/08/06 22:10:37 ERROR TaskSetManager: Task 1476 in stage 1.0 failed 1 times;
aborting job
14/08/06 22:10:37 WARN TaskSetManager: Lost task 1478.0 in stage 1.0 (TID
11478, localhost): java.io.EOFException:
java.io.DataInputStream.readInt(DataInputStream.java:392)
org.apache.spark.api.python.PythonWorkerFactory.createSocket$1(PythonWorkerFactory.scala:69)
org.apache.spark.api.python.PythonWorkerFactory.liftedTree1$1(PythonWorkerFactory.scala:83)
org.apache.spark.api.python.PythonWorkerFactory.createThroughDaemon(PythonWorkerFactory.scala:82)
org.apache.spark.api.python.PythonWorkerFactory.create(PythonWorkerFactory.scala:55)
org.apache.spark.SparkEnv.createPythonWorker(SparkEnv.scala:101)
org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:66)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62)
org.apache.spark.scheduler.Task.run(Task.scala:54)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:199)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
java.lang.Thread.run(Thread.java:745)
Another one:
Daemon failed to fork PySpark worker: [Errno 35] Resource temporarily
unavailable
14/08/07 12:04:37 ERROR Executor: Exception in task 15579.0 in stage 0.0 (TID
15579)
java.lang.IllegalStateException: Python daemon failed to launch worker
at
org.apache.spark.api.python.PythonWorkerFactory.createSocket$1(PythonWorkerFactory.scala:71)
at
org.apache.spark.api.python.PythonWorkerFactory.liftedTree1$1(PythonWorkerFactory.scala:83)
at
org.apache.spark.api.python.PythonWorkerFactory.createThroughDaemon(PythonWorkerFactory.scala:82)
at
org.apache.spark.api.python.PythonWorkerFactory.create(PythonWorkerFactory.scala:55)
at org.apache.spark.SparkEnv.createPythonWorker(SparkEnv.scala:101)
at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:66)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62)
at org.apache.spark.scheduler.Task.run(Task.scala:54)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:199)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
14/08/07 12:04:37 WARN TaskSetManager: Lost task 15579.0 in stage 0.0 (TID
15579, localhost): java.lang.IllegalStateException: Python daemon failed to
launch worker
org.apache.spark.api.python.PythonWorkerFactory.createSocket$1(PythonWorkerFactory.scala:71)
org.apache.spark.api.python.PythonWorkerFactory.liftedTree1$1(PythonWorkerFactory.scala:83)
org.apache.spark.api.python.PythonWorkerFactory.createThroughDaemon(PythonWorkerFactory.scala:82)
org.apache.spark.api.python.PythonWorkerFactory.create(PythonWorkerFactory.scala:55)
org.apache.spark.SparkEnv.createPythonWorker(SparkEnv.scala:101)
org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:66)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62)
org.apache.spark.scheduler.Task.run(Task.scala:54)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:199)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
java.lang.Thread.run(Thread.java:745)
14/08/07 12:04:37 ERROR TaskSetManager: Task 15579 in stage 0.0 failed 1 times;
aborting job
was:
--------------------------------------------------------------------
Java options: -Dspark.storage.memoryFraction=0.66
-Dspark.serializer=org.apache.spark.serializer.JavaSerializer
-Dspark.executor.memory=3g -Dspark.locality.wait=60000000
Options: SchedulerThroughputTest --num-tasks=10000 --num-trials=4
--inter-trial-wait=1
--------------------------------------------------------------------
14/08/06 22:09:41 WARN JettyUtils: Failed to create UI on port 4040. Trying
again on port 4041. - Failure(java.net.BindException: Address already in use)
worker 50114 crashed abruptly with exit status 1
14/08/06 22:10:37 ERROR Executor: Exception in task 1476.0 in stage 1.0 (TID
11476)
org.apache.spark.SparkException: Python worker exited unexpectedly (crashed)
at
org.apache.spark.api.python.PythonRDD$$anon$1.read(PythonRDD.scala:150)
at
org.apache.spark.api.python.PythonRDD$$anon$1.<init>(PythonRDD.scala:154)
at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:87)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62)
at org.apache.spark.scheduler.Task.run(Task.scala:54)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:199)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.io.EOFException
at java.io.DataInputStream.readInt(DataInputStream.java:392)
at
org.apache.spark.api.python.PythonRDD$$anon$1.read(PythonRDD.scala:101)
... 10 more
14/08/06 22:10:37 WARN PythonWorkerFactory: Failed to open socket to Python
daemon:
java.net.ConnectException: Connection refused
at java.net.PlainSocketImpl.socketConnect(Native Method)
at
java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:339)
at
java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:200)
at
java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:182)
at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392)
at java.net.Socket.connect(Socket.java:579)
at java.net.Socket.connect(Socket.java:528)
at java.net.Socket.<init>(Socket.java:425)
at java.net.Socket.<init>(Socket.java:241)
at
org.apache.spark.api.python.PythonWorkerFactory.createSocket$1(PythonWorkerFactory.scala:68)
at
org.apache.spark.api.python.PythonWorkerFactory.liftedTree1$1(PythonWorkerFactory.scala:83)
at
org.apache.spark.api.python.PythonWorkerFactory.createThroughDaemon(PythonWorkerFactory.scala:82)
at
org.apache.spark.api.python.PythonWorkerFactory.create(PythonWorkerFactory.scala:55)
at org.apache.spark.SparkEnv.createPythonWorker(SparkEnv.scala:101)
at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:66)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62)
at org.apache.spark.scheduler.Task.run(Task.scala:54)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:199)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
14/08/06 22:10:37 ERROR Executor: Exception in task 1478.0 in stage 1.0 (TID
11478)
java.io.EOFException
at java.io.DataInputStream.readInt(DataInputStream.java:392)
at
org.apache.spark.api.python.PythonWorkerFactory.createSocket$1(PythonWorkerFactory.scala:69)
at
org.apache.spark.api.python.PythonWorkerFactory.liftedTree1$1(PythonWorkerFactory.scala:83)
at
org.apache.spark.api.python.PythonWorkerFactory.createThroughDaemon(PythonWorkerFactory.scala:82)
at
org.apache.spark.api.python.PythonWorkerFactory.create(PythonWorkerFactory.scala:55)
at org.apache.spark.SparkEnv.createPythonWorker(SparkEnv.scala:101)
at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:66)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62)
at org.apache.spark.scheduler.Task.run(Task.scala:54)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:199)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
14/08/06 22:10:37 WARN PythonWorkerFactory: Assuming that daemon unexpectedly
quit, attempting to restart
14/08/06 22:10:37 WARN TaskSetManager: Lost task 1476.0 in stage 1.0 (TID
11476, localhost): org.apache.spark.SparkException: Python worker exited
unexpectedly (crashed)
org.apache.spark.api.python.PythonRDD$$anon$1.read(PythonRDD.scala:150)
org.apache.spark.api.python.PythonRDD$$anon$1.<init>(PythonRDD.scala:154)
org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:87)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62)
org.apache.spark.scheduler.Task.run(Task.scala:54)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:199)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
java.lang.Thread.run(Thread.java:745)
14/08/06 22:10:37 ERROR TaskSetManager: Task 1476 in stage 1.0 failed 1 times;
aborting job
14/08/06 22:10:37 WARN TaskSetManager: Lost task 1478.0 in stage 1.0 (TID
11478, localhost): java.io.EOFException:
java.io.DataInputStream.readInt(DataInputStream.java:392)
org.apache.spark.api.python.PythonWorkerFactory.createSocket$1(PythonWorkerFactory.scala:69)
org.apache.spark.api.python.PythonWorkerFactory.liftedTree1$1(PythonWorkerFactory.scala:83)
org.apache.spark.api.python.PythonWorkerFactory.createThroughDaemon(PythonWorkerFactory.scala:82)
org.apache.spark.api.python.PythonWorkerFactory.create(PythonWorkerFactory.scala:55)
org.apache.spark.SparkEnv.createPythonWorker(SparkEnv.scala:101)
org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:66)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62)
org.apache.spark.scheduler.Task.run(Task.scala:54)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:199)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
java.lang.Thread.run(Thread.java:745)
> Failed to connect to daemon
> ---------------------------
>
> Key: SPARK-2898
> URL: https://issues.apache.org/jira/browse/SPARK-2898
> Project: Spark
> Issue Type: Bug
> Components: PySpark
> Affects Versions: 1.1.0
> Reporter: Davies Liu
>
> --------------------------------------------------------------------
> Java options: -Dspark.storage.memoryFraction=0.66
> -Dspark.serializer=org.apache.spark.serializer.JavaSerializer
> -Dspark.executor.memory=3g -Dspark.locality.wait=60000000
> Options: SchedulerThroughputTest --num-tasks=10000 --num-trials=4
> --inter-trial-wait=1
> --------------------------------------------------------------------
> 14/08/06 22:09:41 WARN JettyUtils: Failed to create UI on port 4040. Trying
> again on port 4041. - Failure(java.net.BindException: Address already in use)
> worker 50114 crashed abruptly with exit status 1
> 14/08/06 22:10:37 ERROR Executor: Exception in task 1476.0 in stage 1.0 (TID
> 11476)
> org.apache.spark.SparkException: Python worker exited unexpectedly (crashed)
> at
> org.apache.spark.api.python.PythonRDD$$anon$1.read(PythonRDD.scala:150)
> at
> org.apache.spark.api.python.PythonRDD$$anon$1.<init>(PythonRDD.scala:154)
> at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:87)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
> at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62)
> at org.apache.spark.scheduler.Task.run(Task.scala:54)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:199)
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
> at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
> at java.lang.Thread.run(Thread.java:745)
> Caused by: java.io.EOFException
> at java.io.DataInputStream.readInt(DataInputStream.java:392)
> at
> org.apache.spark.api.python.PythonRDD$$anon$1.read(PythonRDD.scala:101)
> ... 10 more
> 14/08/06 22:10:37 WARN PythonWorkerFactory: Failed to open socket to Python
> daemon:
> java.net.ConnectException: Connection refused
> at java.net.PlainSocketImpl.socketConnect(Native Method)
> at
> java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:339)
> at
> java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:200)
> at
> java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:182)
> at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392)
> at java.net.Socket.connect(Socket.java:579)
> at java.net.Socket.connect(Socket.java:528)
> at java.net.Socket.<init>(Socket.java:425)
> at java.net.Socket.<init>(Socket.java:241)
> at
> org.apache.spark.api.python.PythonWorkerFactory.createSocket$1(PythonWorkerFactory.scala:68)
> at
> org.apache.spark.api.python.PythonWorkerFactory.liftedTree1$1(PythonWorkerFactory.scala:83)
> at
> org.apache.spark.api.python.PythonWorkerFactory.createThroughDaemon(PythonWorkerFactory.scala:82)
> at
> org.apache.spark.api.python.PythonWorkerFactory.create(PythonWorkerFactory.scala:55)
> at org.apache.spark.SparkEnv.createPythonWorker(SparkEnv.scala:101)
> at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:66)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
> at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62)
> at org.apache.spark.scheduler.Task.run(Task.scala:54)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:199)
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
> at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
> at java.lang.Thread.run(Thread.java:745)
> 14/08/06 22:10:37 ERROR Executor: Exception in task 1478.0 in stage 1.0 (TID
> 11478)
> java.io.EOFException
> at java.io.DataInputStream.readInt(DataInputStream.java:392)
> at
> org.apache.spark.api.python.PythonWorkerFactory.createSocket$1(PythonWorkerFactory.scala:69)
> at
> org.apache.spark.api.python.PythonWorkerFactory.liftedTree1$1(PythonWorkerFactory.scala:83)
> at
> org.apache.spark.api.python.PythonWorkerFactory.createThroughDaemon(PythonWorkerFactory.scala:82)
> at
> org.apache.spark.api.python.PythonWorkerFactory.create(PythonWorkerFactory.scala:55)
> at org.apache.spark.SparkEnv.createPythonWorker(SparkEnv.scala:101)
> at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:66)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
> at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62)
> at org.apache.spark.scheduler.Task.run(Task.scala:54)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:199)
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
> at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
> at java.lang.Thread.run(Thread.java:745)
> 14/08/06 22:10:37 WARN PythonWorkerFactory: Assuming that daemon unexpectedly
> quit, attempting to restart
> 14/08/06 22:10:37 WARN TaskSetManager: Lost task 1476.0 in stage 1.0 (TID
> 11476, localhost): org.apache.spark.SparkException: Python worker exited
> unexpectedly (crashed)
>
> org.apache.spark.api.python.PythonRDD$$anon$1.read(PythonRDD.scala:150)
>
> org.apache.spark.api.python.PythonRDD$$anon$1.<init>(PythonRDD.scala:154)
> org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:87)
> org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
> org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
> org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62)
> org.apache.spark.scheduler.Task.run(Task.scala:54)
> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:199)
>
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
> java.lang.Thread.run(Thread.java:745)
> 14/08/06 22:10:37 ERROR TaskSetManager: Task 1476 in stage 1.0 failed 1
> times; aborting job
> 14/08/06 22:10:37 WARN TaskSetManager: Lost task 1478.0 in stage 1.0 (TID
> 11478, localhost): java.io.EOFException:
> java.io.DataInputStream.readInt(DataInputStream.java:392)
>
> org.apache.spark.api.python.PythonWorkerFactory.createSocket$1(PythonWorkerFactory.scala:69)
>
> org.apache.spark.api.python.PythonWorkerFactory.liftedTree1$1(PythonWorkerFactory.scala:83)
>
> org.apache.spark.api.python.PythonWorkerFactory.createThroughDaemon(PythonWorkerFactory.scala:82)
>
> org.apache.spark.api.python.PythonWorkerFactory.create(PythonWorkerFactory.scala:55)
> org.apache.spark.SparkEnv.createPythonWorker(SparkEnv.scala:101)
> org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:66)
> org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
> org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
> org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62)
> org.apache.spark.scheduler.Task.run(Task.scala:54)
> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:199)
>
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
> java.lang.Thread.run(Thread.java:745)
> Another one:
> Daemon failed to fork PySpark worker: [Errno 35] Resource temporarily
> unavailable
> 14/08/07 12:04:37 ERROR Executor: Exception in task 15579.0 in stage 0.0 (TID
> 15579)
> java.lang.IllegalStateException: Python daemon failed to launch worker
> at
> org.apache.spark.api.python.PythonWorkerFactory.createSocket$1(PythonWorkerFactory.scala:71)
> at
> org.apache.spark.api.python.PythonWorkerFactory.liftedTree1$1(PythonWorkerFactory.scala:83)
> at
> org.apache.spark.api.python.PythonWorkerFactory.createThroughDaemon(PythonWorkerFactory.scala:82)
> at
> org.apache.spark.api.python.PythonWorkerFactory.create(PythonWorkerFactory.scala:55)
> at org.apache.spark.SparkEnv.createPythonWorker(SparkEnv.scala:101)
> at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:66)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
> at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62)
> at org.apache.spark.scheduler.Task.run(Task.scala:54)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:199)
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
> at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
> at java.lang.Thread.run(Thread.java:745)
> 14/08/07 12:04:37 WARN TaskSetManager: Lost task 15579.0 in stage 0.0 (TID
> 15579, localhost): java.lang.IllegalStateException: Python daemon failed to
> launch worker
>
> org.apache.spark.api.python.PythonWorkerFactory.createSocket$1(PythonWorkerFactory.scala:71)
>
> org.apache.spark.api.python.PythonWorkerFactory.liftedTree1$1(PythonWorkerFactory.scala:83)
>
> org.apache.spark.api.python.PythonWorkerFactory.createThroughDaemon(PythonWorkerFactory.scala:82)
>
> org.apache.spark.api.python.PythonWorkerFactory.create(PythonWorkerFactory.scala:55)
> org.apache.spark.SparkEnv.createPythonWorker(SparkEnv.scala:101)
> org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:66)
> org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
> org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
> org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:62)
> org.apache.spark.scheduler.Task.run(Task.scala:54)
> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:199)
>
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
> java.lang.Thread.run(Thread.java:745)
> 14/08/07 12:04:37 ERROR TaskSetManager: Task 15579 in stage 0.0 failed 1
> times; aborting job
--
This message was sent by Atlassian JIRA
(v6.2#6252)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]