[ 
https://issues.apache.org/jira/browse/SPARK-17579?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15501927#comment-15501927
 ] 

Jianfei Wang commented on SPARK-17579:
--------------------------------------

{code}
16/09/19 08:49:41 INFO TaskSetManager: Starting task 0.0 in stage 1.0 (TID 1, 
Slave4, partition 1, PROCESS_LOCAL, 5424 bytes)
16/09/19 08:49:41 INFO TaskSetManager: Starting task 1.0 in stage 1.0 (TID 2, 
Slave5, partition 2, PROCESS_LOCAL, 5424 bytes)
16/09/19 08:49:41 INFO TaskSetManager: Starting task 2.0 in stage 1.0 (TID 3, 
Slave4, partition 3, PROCESS_LOCAL, 5424 bytes)
16/09/19 08:49:41 INFO YarnSchedulerBackend$YarnDriverEndpoint: Launching task 
1 on executor id: 2 hostname: Slave4.
16/09/19 08:49:41 INFO YarnSchedulerBackend$YarnDriverEndpoint: Launching task 
3 on executor id: 2 hostname: Slave4.
16/09/19 08:49:41 INFO YarnSchedulerBackend$YarnDriverEndpoint: Launching task 
2 on executor id: 1 hostname: Slave5.
16/09/19 08:49:41 INFO BlockManagerInfo: Added broadcast_1_piece0 in memory on 
Slave4:60202 (size: 3.4 KB, free: 912.3 MB)
16/09/19 08:49:41 WARN TaskSetManager: Lost task 0.0 in stage 1.0 (TID 1, 
Slave4): java.lang.NoClassDefFoundError: Could not initialize class 
bench.mllib.wjf.Main$
        at bench.mllib.wjf.Main$$anonfun$main$1.apply(Test.scala:15)
        at bench.mllib.wjf.Main$$anonfun$main$1.apply(Test.scala:15)
        at 
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown
 Source)
        at 
org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
        at 
org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$8$$anon$1.hasNext(WholeStageCodegenExec.scala:370)
        at 
org.apache.spark.sql.execution.SparkPlan$$anonfun$4.apply(SparkPlan.scala:246)
        at 
org.apache.spark.sql.execution.SparkPlan$$anonfun$4.apply(SparkPlan.scala:240)
        at 
org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:784)
        at 
org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:784)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
        at org.apache.spark.scheduler.Task.run(Task.scala:85)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)

16/09/19 08:49:41 INFO TaskSetManager: Starting task 0.1 in stage 1.0 (TID 4, 
Slave4, partition 1, PROCESS_LOCAL, 5424 bytes)
16/09/19 08:49:41 WARN TaskSetManager: Lost task 2.0 in stage 1.0 (TID 3, 
Slave4): java.lang.ExceptionInInitializerError
        at bench.mllib.wjf.Main$.<init>(Test.scala:12)
        at bench.mllib.wjf.Main$.<clinit>(Test.scala)
        at bench.mllib.wjf.Main$$anonfun$main$1.apply(Test.scala:15)
        at bench.mllib.wjf.Main$$anonfun$main$1.apply(Test.scala:15)
        at 
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown
 Source)
        at 
org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
        at 
org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$8$$anon$1.hasNext(WholeStageCodegenExec.scala:370)
        at 
org.apache.spark.sql.execution.SparkPlan$$anonfun$4.apply(SparkPlan.scala:246)
        at 
org.apache.spark.sql.execution.SparkPlan$$anonfun$4.apply(SparkPlan.scala:240)
        at 
org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:784)
        at 
org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:784)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
        at org.apache.spark.scheduler.Task.run(Task.scala:85)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)
Caused by: org.apache.spark.SparkException: A master URL must be set in your 
configuration
        at org.apache.spark.SparkContext.<init>(SparkContext.scala:371)
        at org.apache.spark.SparkContext$.getOrCreate(SparkContext.scala:2256)
        at 
org.apache.spark.sql.SparkSession$Builder$$anonfun$8.apply(SparkSession.scala:831)
        at 
org.apache.spark.sql.SparkSession$Builder$$anonfun$8.apply(SparkSession.scala:823)
        at scala.Option.getOrElse(Option.scala:121)
        at 
org.apache.spark.sql.SparkSession$Builder.getOrCreate(SparkSession.scala:823)
        at bench.mllib.wjf.Env$.<init>(Test.scala:5)
        at bench.mllib.wjf.Env$.<clinit>(Test.scala)
        ... 20 more

16/09/19 08:49:41 INFO YarnSchedulerBackend$YarnDriverEndpoint: Launching task 
4 on executor id: 2 hostname: Slave4.
16/09/19 08:49:41 INFO TaskSetManager: Starting task 2.1 in stage 1.0 (TID 5, 
Slave5, partition 3, PROCESS_LOCAL, 5424 bytes)
16/09/19 08:49:41 INFO YarnSchedulerBackend$YarnDriverEndpoint: Launching task 
5 on executor id: 1 hostname: Slave5.
16/09/19 08:49:42 WARN TransportChannelHandler: Exception in connection from 
/133.133.134.94:48971
java.io.IOException: Connection reset by peer
        at sun.nio.ch.FileDispatcherImpl.read0(Native Method)
        at sun.nio.ch.SocketDispatcher.read(SocketDispatcher.java:39)
        at sun.nio.ch.IOUtil.readIntoNativeBuffer(IOUtil.java:223)
        at sun.nio.ch.IOUtil.read(IOUtil.java:192)
        at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:384)
        at 
io.netty.buffer.PooledUnsafeDirectByteBuf.setBytes(PooledUnsafeDirectByteBuf.java:313)
        at io.netty.buffer.AbstractByteBuf.writeBytes(AbstractByteBuf.java:881)
        at 
io.netty.channel.socket.nio.NioSocketChannel.doReadBytes(NioSocketChannel.java:242)
        at 
io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:119)
        at 
io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:511)
        at 
io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:468)
        at 
io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:382)
        at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:354)
        at 
io.netty.util.concurrent.SingleThreadEventExecutor$2.run(SingleThreadEventExecutor.java:111)
        at java.lang.Thread.run(Thread.java:745)
16/09/19 08:49:42 INFO YarnSchedulerBackend$YarnDriverEndpoint: Disabling 
executor 2.
16/09/19 08:49:42 INFO DAGScheduler: Executor lost: 2 (epoch 0)
16/09/19 08:49:42 INFO BlockManagerMasterEndpoint: Trying to remove executor 2 
from BlockManagerMaster.
16/09/19 08:49:42 INFO BlockManagerMasterEndpoint: Removing block manager 
BlockManagerId(2, Slave4, 60202)
16/09/19 08:49:42 INFO BlockManagerMaster: Removed 2 successfully in 
removeExecutor
16/09/19 08:49:42 WARN YarnSchedulerBackend$YarnSchedulerEndpoint: Container 
marked as failed: container_1473411956092_0087_01_000003 on host: Slave4. Exit 
status: 50. Diagnostics: Exception from container-launch.
Container id: container_1473411956092_0087_01_000003
Exit code: 50
Stack trace: ExitCodeException exitCode=50: 
        at org.apache.hadoop.util.Shell.runCommand(Shell.java:545)
        at org.apache.hadoop.util.Shell.run(Shell.java:456)
        at 
org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:722)
        at 
org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:211)
        at 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:302)
        at 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:82)
        at java.util.concurrent.FutureTask.run(FutureTask.java:262)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)


Container exited with a non-zero exit code 50

16/09/19 08:49:42 ERROR YarnScheduler: Lost executor 2 on Slave4: Container 
marked as failed: container_1473411956092_0087_01_000003 on host: Slave4. Exit 
status: 50. Diagnostics: Exception from container-launch.
Container id: container_1473411956092_0087_01_000003
Exit code: 50
Stack trace: ExitCodeException exitCode=50: 
        at org.apache.hadoop.util.Shell.runCommand(Shell.java:545)
        at org.apache.hadoop.util.Shell.run(Shell.java:456)
        at 
org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:722)
        at 
org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:211)
        at 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:302)
        at 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:82)
        at java.util.concurrent.FutureTask.run(FutureTask.java:262)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)


Container exited with a non-zero exit code 50

16/09/19 08:49:42 WARN TaskSetManager: Lost task 0.1 in stage 1.0 (TID 4, 
Slave4): ExecutorLostFailure (executor 2 exited caused by one of the running 
tasks) Reason: Container marked as failed: 
container_1473411956092_0087_01_000003 on host: Slave4. Exit status: 50. 
Diagnostics: Exception from container-launch.
Container id: container_1473411956092_0087_01_000003
Exit code: 50
Stack trace: ExitCodeException exitCode=50: 
        at org.apache.hadoop.util.Shell.runCommand(Shell.java:545)
        at org.apache.hadoop.util.Shell.run(Shell.java:456)
        at 
org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:722)
        at 
org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:211)
        at 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:302)
        at 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:82)
        at java.util.concurrent.FutureTask.run(FutureTask.java:262)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)


Container exited with a non-zero exit code 50

16/09/19 08:49:42 INFO BlockManagerMaster: Removal of executor 2 requested
16/09/19 08:49:42 INFO YarnSchedulerBackend$YarnDriverEndpoint: Asked to remove 
non-existent executor 2
16/09/19 08:49:42 INFO BlockManagerMasterEndpoint: Trying to remove executor 2 
from BlockManagerMaster.
16/09/19 08:49:45 INFO BlockManagerInfo: Removed broadcast_0_piece0 on 
133.133.134.119:49063 in memory (size: 3.4 KB, free: 912.3 MB)
16/09/19 08:49:45 INFO BlockManagerInfo: Added broadcast_1_piece0 in memory on 
Slave5:43647 (size: 3.4 KB, free: 912.3 MB)
16/09/19 08:49:46 INFO TaskSetManager: Starting task 0.2 in stage 1.0 (TID 6, 
Slave5, partition 1, PROCESS_LOCAL, 5424 bytes)
16/09/19 08:49:46 INFO TaskSetManager: Lost task 1.0 in stage 1.0 (TID 2) on 
executor Slave5: java.lang.ExceptionInInitializerError (null) [duplicate 1]
16/09/19 08:49:46 INFO YarnSchedulerBackend$YarnDriverEndpoint: Launching task 
6 on executor id: 1 hostname: Slave5.
16/09/19 08:49:46 INFO TaskSetManager: Starting task 1.1 in stage 1.0 (TID 7, 
Slave5, partition 2, PROCESS_LOCAL, 5424 bytes)
16/09/19 08:49:46 INFO YarnSchedulerBackend$YarnDriverEndpoint: Launching task 
7 on executor id: 1 hostname: Slave5.
16/09/19 08:49:46 INFO TaskSetManager: Lost task 2.1 in stage 1.0 (TID 5) on 
executor Slave5: java.lang.NoClassDefFoundError (Could not initialize class 
bench.mllib.wjf.Main$) [duplicate 1]
16/09/19 08:49:46 WARN TransportChannelHandler: Exception in connection from 
/133.133.134.143:49591
java.io.IOException: Connection reset by peer
        at sun.nio.ch.FileDispatcherImpl.read0(Native Method)
        at sun.nio.ch.SocketDispatcher.read(SocketDispatcher.java:39)
        at sun.nio.ch.IOUtil.readIntoNativeBuffer(IOUtil.java:223)
        at sun.nio.ch.IOUtil.read(IOUtil.java:192)
        at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:384)
        at 
io.netty.buffer.PooledUnsafeDirectByteBuf.setBytes(PooledUnsafeDirectByteBuf.java:313)
        at io.netty.buffer.AbstractByteBuf.writeBytes(AbstractByteBuf.java:881)
        at 
io.netty.channel.socket.nio.NioSocketChannel.doReadBytes(NioSocketChannel.java:242)
        at 
io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:119)
        at 
io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:511)
        at 
io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:468)
        at 
io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:382)
        at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:354)
        at 
io.netty.util.concurrent.SingleThreadEventExecutor$2.run(SingleThreadEventExecutor.java:111)
        at java.lang.Thread.run(Thread.java:745)
16/09/19 08:49:46 INFO YarnSchedulerBackend$YarnDriverEndpoint: Disabling 
executor 1.
16/09/19 08:49:46 INFO DAGScheduler: Executor lost: 1 (epoch 1)
16/09/19 08:49:46 INFO BlockManagerMasterEndpoint: Trying to remove executor 1 
from BlockManagerMaster.
16/09/19 08:49:46 INFO BlockManagerMasterEndpoint: Removing block manager 
BlockManagerId(1, Slave5, 43647)
16/09/19 08:49:46 INFO BlockManagerMaster: Removed 1 successfully in 
removeExecutor
16/09/19 08:49:47 ERROR YarnScheduler: Lost executor 1 on Slave5: Container 
marked as failed: container_1473411956092_0087_01_000002 on host: Slave5. Exit 
status: 50. Diagnostics: Exception from container-launch.
Container id: container_1473411956092_0087_01_000002
Exit code: 50
Stack trace: ExitCodeException exitCode=50: 
        at org.apache.hadoop.util.Shell.runCommand(Shell.java:545)
        at org.apache.hadoop.util.Shell.run(Shell.java:456)
        at 
org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:722)
        at 
org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:211)
        at 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:302)
        at 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:82)
        at java.util.concurrent.FutureTask.run(FutureTask.java:262)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)


Container exited with a non-zero exit code 50

16/09/19 08:49:47 WARN YarnSchedulerBackend$YarnSchedulerEndpoint: Container 
marked as failed: container_1473411956092_0087_01_000002 on host: Slave5. Exit 
status: 50. Diagnostics: Exception from container-launch.
Container id: container_1473411956092_0087_01_000002
Exit code: 50
Stack trace: ExitCodeException exitCode=50: 
        at org.apache.hadoop.util.Shell.runCommand(Shell.java:545)
        at org.apache.hadoop.util.Shell.run(Shell.java:456)
        at 
org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:722)
        at 
org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:211)
        at 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:302)
        at 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:82)
        at java.util.concurrent.FutureTask.run(FutureTask.java:262)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)


Container exited with a non-zero exit code 50

16/09/19 08:49:47 WARN TaskSetManager: Lost task 1.1 in stage 1.0 (TID 7, 
Slave5): ExecutorLostFailure (executor 1 exited caused by one of the running 
tasks) Reason: Container marked as failed: 
container_1473411956092_0087_01_000002 on host: Slave5. Exit status: 50. 
Diagnostics: Exception from container-launch.
Container id: container_1473411956092_0087_01_000002
Exit code: 50
Stack trace: ExitCodeException exitCode=50: 
        at org.apache.hadoop.util.Shell.runCommand(Shell.java:545)
        at org.apache.hadoop.util.Shell.run(Shell.java:456)
        at 
org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:722)
        at 
org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:211)
        at 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:302)
        at 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:82)
        at java.util.concurrent.FutureTask.run(FutureTask.java:262)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)


Container exited with a non-zero exit code 50

16/09/19 08:49:47 WARN TaskSetManager: Lost task 0.2 in stage 1.0 (TID 6, 
Slave5): ExecutorLostFailure (executor 1 exited caused by one of the running 
tasks) Reason: Container marked as failed: 
container_1473411956092_0087_01_000002 on host: Slave5. Exit status: 50. 
Diagnostics: Exception from container-launch.
Container id: container_1473411956092_0087_01_000002
Exit code: 50
Stack trace: ExitCodeException exitCode=50: 
        at org.apache.hadoop.util.Shell.runCommand(Shell.java:545)
        at org.apache.hadoop.util.Shell.run(Shell.java:456)
        at 
org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:722)
        at 
org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:211)
        at 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:302)
        at 
org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:82)
        at java.util.concurrent.FutureTask.run(FutureTask.java:262)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)


Container exited with a non-zero exit code 50

16/09/19 08:49:47 INFO BlockManagerMaster: Removal of executor 1 requested
16/09/19 08:49:47 INFO YarnSchedulerBackend$YarnDriverEndpoint: Asked to remove 
non-existent executor 1
16/09/19 08:49:47 INFO BlockManagerMasterEndpoint: Trying to remove executor 1 
from BlockManagerMaster.
16/09/19 08:49:49 INFO YarnSchedulerBackend$YarnDriverEndpoint: Registered 
executor NettyRpcEndpointRef(null) (133.133.134.121:59054) with ID 3
16/09/19 08:49:49 INFO TaskSetManager: Starting task 0.3 in stage 1.0 (TID 8, 
Slave1, partition 1, PROCESS_LOCAL, 5424 bytes)
16/09/19 08:49:49 INFO TaskSetManager: Starting task 1.2 in stage 1.0 (TID 9, 
Slave1, partition 2, PROCESS_LOCAL, 5424 bytes)
16/09/19 08:49:49 INFO YarnSchedulerBackend$YarnDriverEndpoint: Launching task 
8 on executor id: 3 hostname: Slave1.
16/09/19 08:49:49 INFO YarnSchedulerBackend$YarnDriverEndpoint: Launching task 
9 on executor id: 3 hostname: Slave1.
16/09/19 08:49:49 INFO BlockManagerMasterEndpoint: Registering block manager 
Slave1:41199 with 912.3 MB RAM, BlockManagerId(3, Slave1, 41199)
16/09/19 08:49:53 INFO BlockManagerInfo: Added broadcast_1_piece0 in memory on 
Slave1:41199 (size: 3.4 KB, free: 912.3 MB)
16/09/19 08:49:53 INFO TaskSetManager: Starting task 2.2 in stage 1.0 (TID 10, 
Slave1, partition 3, PROCESS_LOCAL, 5424 bytes)
16/09/19 08:49:53 INFO YarnSchedulerBackend$YarnDriverEndpoint: Launching task 
10 on executor id: 3 hostname: Slave1.
16/09/19 08:49:53 WARN TaskSetManager: Lost task 0.3 in stage 1.0 (TID 8, 
Slave1): java.lang.NoClassDefFoundError: Could not initialize class 
bench.mllib.wjf.Main$
        at bench.mllib.wjf.Main$$anonfun$main$1.apply(Test.scala:15)
        at bench.mllib.wjf.Main$$anonfun$main$1.apply(Test.scala:15)
        at 
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown
 Source)
        at 
org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
        at 
org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$8$$anon$1.hasNext(WholeStageCodegenExec.scala:370)
        at 
org.apache.spark.sql.execution.SparkPlan$$anonfun$4.apply(SparkPlan.scala:246)
        at 
org.apache.spark.sql.execution.SparkPlan$$anonfun$4.apply(SparkPlan.scala:240)
        at 
org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:784)
        at 
org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:784)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
        at org.apache.spark.scheduler.Task.run(Task.scala:85)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)

16/09/19 08:49:53 ERROR TaskSetManager: Task 0 in stage 1.0 failed 4 times; 
aborting job
16/09/19 08:49:53 INFO YarnScheduler: Cancelling stage 1
16/09/19 08:49:53 INFO YarnScheduler: Stage 1 was cancelled
16/09/19 08:49:53 INFO DAGScheduler: ResultStage 1 (show at Test.scala:15) 
failed in 12.514 s
16/09/19 08:49:53 INFO DAGScheduler: Job 1 failed: show at Test.scala:15, took 
12.548695 s
Exception in thread "main" org.apache.spark.SparkException: Job aborted due to 
stage failure: Task 0 in stage 1.0 failed 4 times, most recent failure: Lost 
task 0.3 in stage 1.0 (TID 8, Slave1): java.lang.NoClassDefFoundError: Could 
not initialize class bench.mllib.wjf.Main$
        at bench.mllib.wjf.Main$$anonfun$main$1.apply(Test.scala:15)
        at bench.mllib.wjf.Main$$anonfun$main$1.apply(Test.scala:15)
        at 
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown
 Source)
        at 
org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
        at 
org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$8$$anon$1.hasNext(WholeStageCodegenExec.scala:370)
        at 
org.apache.spark.sql.execution.SparkPlan$$anonfun$4.apply(SparkPlan.scala:246)
        at 
org.apache.spark.sql.execution.SparkPlan$$anonfun$4.apply(SparkPlan.scala:240)
        at 
org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:784)
        at 
org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:784)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
        at org.apache.spark.scheduler.Task.run(Task.scala:85)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)

Driver stacktrace:
        at 
org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1450)
        at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1438)
        at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1437)
        at 
scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
        at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
        at 
org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1437)
        at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:811)
        at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:811)
        at scala.Option.foreach(Option.scala:257)
        at 
org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:811)
        at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1659)
        at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1618)
        at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1607)
        at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
        at 
org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:632)
        at org.apache.spark.SparkContext.runJob(SparkContext.scala:1871)
        at org.apache.spark.SparkContext.runJob(SparkContext.scala:1884)
        at org.apache.spark.SparkContext.runJob(SparkContext.scala:1897)
        at 
org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:347)
        at 
org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:39)
        at 
org.apache.spark.sql.Dataset$$anonfun$org$apache$spark$sql$Dataset$$execute$1$1.apply(Dataset.scala:2183)
        at 
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:57)
        at org.apache.spark.sql.Dataset.withNewExecutionId(Dataset.scala:2532)
        at 
org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$execute$1(Dataset.scala:2182)
        at 
org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collect(Dataset.scala:2189)
        at 
org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:1925)
        at 
org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:1924)
        at org.apache.spark.sql.Dataset.withTypedCallback(Dataset.scala:2562)
        at org.apache.spark.sql.Dataset.head(Dataset.scala:1924)
        at org.apache.spark.sql.Dataset.take(Dataset.scala:2139)
        at org.apache.spark.sql.Dataset.showString(Dataset.scala:239)
        at org.apache.spark.sql.Dataset.show(Dataset.scala:526)
        at org.apache.spark.sql.Dataset.show(Dataset.scala:486)
        at org.apache.spark.sql.Dataset.show(Dataset.scala:495)
        at bench.mllib.wjf.Main$.main(Test.scala:15)
        at bench.mllib.wjf.Main.main(Test.scala)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:606)
        at 
org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:729)
        at 
org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:185)
        at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:210)
        at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:124)
        at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
Caused by: java.lang.NoClassDefFoundError: Could not initialize class 
bench.mllib.wjf.Main$
        at bench.mllib.wjf.Main$$anonfun$main$1.apply(Test.scala:15)
        at bench.mllib.wjf.Main$$anonfun$main$1.apply(Test.scala:15)
        at 
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown
 Source)
        at 
org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
        at 
org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$8$$anon$1.hasNext(WholeStageCodegenExec.scala:370)
        at 
org.apache.spark.sql.execution.SparkPlan$$anonfun$4.apply(SparkPlan.scala:246)
        at 
org.apache.spark.sql.execution.SparkPlan$$anonfun$4.apply(SparkPlan.scala:240)
        at 
org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:784)
        at 
org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:784)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:283)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70)
        at org.apache.spark.scheduler.Task.run(Task.scala:85)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:274)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)
16/09/19 08:49:53 INFO SparkContext: Invoking stop() from shutdown hook
16/09/19 08:49:53 INFO SparkUI: Stopped Spark web UI at 
http://133.133.134.119:4041
16/09/19 08:49:54 INFO YarnClientSchedulerBackend: Interrupting monitor thread
16/09/19 08:49:54 INFO YarnClientSchedulerBackend: Shutting down all executors
16/09/19 08:49:54 INFO YarnSchedulerBackend$YarnDriverEndpoint: Asking each 
executor to shut down
16/09/19 08:49:54 INFO SchedulerExtensionServices: Stopping 
SchedulerExtensionServices
(serviceOption=None,
 services=List(),
 started=false)
16/09/19 08:49:54 INFO YarnClientSchedulerBackend: Stopped
16/09/19 08:49:54 INFO MapOutputTrackerMasterEndpoint: 
MapOutputTrackerMasterEndpoint stopped!
16/09/19 08:49:54 INFO MemoryStore: MemoryStore cleared
16/09/19 08:49:54 INFO BlockManager: BlockManager stopped
16/09/19 08:49:54 INFO BlockManagerMaster: BlockManagerMaster stopped
16/09/19 08:49:54 INFO OutputCommitCoordinator$OutputCommitCoordinatorEndpoint: 
OutputCommitCoordinator stopped!
16/09/19 08:49:54 INFO SparkContext: Successfully stopped SparkContext
16/09/19 08:49:54 INFO ShutdownHookManager: Shutdown hook called
16/09/19 08:49:54 INFO ShutdownHookManager: Deleting directory 
/tmp/spark-ed3961bc-7e31-43d7-a827-553874e6180f

{code}

> Exception When the Main object extends Encoder in cluster mode but ok in 
> local mode
> -----------------------------------------------------------------------------------
>
>                 Key: SPARK-17579
>                 URL: https://issues.apache.org/jira/browse/SPARK-17579
>             Project: Spark
>          Issue Type: Bug
>          Components: Spark Core, SQL
>    Affects Versions: 2.0.0
>            Reporter: Jianfei Wang
>
> this the code below: I got exception in cluster mode, but it's ok in local 
> mode.
> Besides if I remove the extends in Main object it will be ok in cluster mode.
> Why this?
> {code}
> import org.apache.spark.sql._
> object Env {
>   val spark = SparkSession.builder.getOrCreate()
> }
> import Env.spark.implicits._
> abstract class A[T : Encoder] {}
> object Main extends A[String] {
>   def func(str:String):String = str
>   def main(args: Array[String]): Unit = {
>     Env.spark.createDataset(Seq("a","b","c")).map(func).show()
>   }
> }
> {code}
> I got exception below:
> Caused by: java.lang.NoClassDefFoundError: Could not initialize class Main$ 
> at Main$$anonfun$main$1.apply(test.scala:14) 
> at Main$$anonfun$main$1.apply(test.scala:14) 
> at 
> org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown
>  
> Source) 
> at 
> org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
>  
> at 
> org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$8$$anon$1.hasNext(WholeStageCodegenExec.scala:370)
>  
> at 
> org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:231)
>  
> at 
> org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:225)
>  
> at 
> org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:803)
>  
> at 
> org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$24.apply(RDD.scala:803)
>  
> at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38) 
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:319) 
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:283) 
> at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:70) 
> at org.apache.spark.scheduler.Task.run(Task.scala:86) 
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:277) 
> at 
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
>  
> at 
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
>  
> at java.lang.Thread.run(Thread.java:745) 



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to