HyukjinKwon commented on a change in pull request #28661:
URL: https://github.com/apache/spark/pull/28661#discussion_r431634444



##########
File path: python/pyspark/sql/utils.py
##########
@@ -75,21 +96,29 @@ class UnknownException(CapturedException):
 
 def convert_exception(e):
     s = e.toString()
-    stackTrace = '\n\t at '.join(map(lambda x: x.toString(), 
e.getStackTrace()))
     c = e.getCause()
+
+    jvm = SparkContext._jvm
+    jwriter = jvm.java.io.StringWriter()
+    e.printStackTrace(jvm.java.io.PrintWriter(jwriter))
+    stacktrace = jwriter.toString()

Review comment:
       Seems different. This is what I get from `getStackTrace`:
   
   ```
   
org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2117)
         at 
org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2066)
         at 
org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2065)
         at 
scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
         at 
scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
         at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
         at 
org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2065)
         at 
org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1021)
         at 
org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1021)
         at scala.Option.foreach(Option.scala:407)
         at 
org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1021)
         at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2297)
         at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2246)
         at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2235)
         at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
         at 
org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:823)
         at org.apache.spark.SparkContext.runJob(SparkContext.scala:2108)
         at org.apache.spark.SparkContext.runJob(SparkContext.scala:2129)
         at org.apache.spark.SparkContext.runJob(SparkContext.scala:2148)
         at 
org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:467)
         at 
org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:420)
         at 
org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:47)
         at org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:3653)
         at org.apache.spark.sql.Dataset.$anonfun$head$1(Dataset.scala:2695)
         at 
org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3644)
         at 
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
         at 
org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
         at 
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
         at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:763)
         at 
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
         at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3642)
         at org.apache.spark.sql.Dataset.head(Dataset.scala:2695)
         at org.apache.spark.sql.Dataset.take(Dataset.scala:2902)
         at org.apache.spark.sql.Dataset.getRows(Dataset.scala:300)
         at org.apache.spark.sql.Dataset.showString(Dataset.scala:337)
         at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
         at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
         at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
         at java.lang.reflect.Method.invoke(Method.java:498)
         at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
         at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
         at py4j.Gateway.invoke(Gateway.java:282)
         at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
         at py4j.commands.CallCommand.execute(CallCommand.java:79)
         at py4j.GatewayConnection.run(GatewayConnection.java:238)
         at java.lang.Thread.run(Thread.java:748)
   ```
   
   this is what I get from `printStackTrace`
   
   ```
   org.apache.spark.SparkException: Job aborted due to stage failure: Task 10 
in stage 2.0 failed 4 times, most recent failure: Lost task 10.3 in stage 2.0 
(TID 18, 192.168.35.193, executor 2): 
org.apache.spark.api.python.PythonException: Traceback (most recent call last):
     File "/.../spark/python/lib/pyspark.zip/pyspark/worker.py", line 605, in 
main
       process()
     File "/.../spark/python/lib/pyspark.zip/pyspark/worker.py", line 597, in 
process
       serializer.dump_stream(out_iter, outfile)
     File "/.../spark/python/lib/pyspark.zip/pyspark/serializers.py", line 223, 
in dump_stream
       self.serializer.dump_stream(self._batched(iterator), stream)
     File "/.../spark/python/lib/pyspark.zip/pyspark/serializers.py", line 141, 
in dump_stream
       for obj in iterator:
     File "/.../spark/python/lib/pyspark.zip/pyspark/serializers.py", line 212, 
in _batched
       for item in iterator:
     File "/.../spark/python/lib/pyspark.zip/pyspark/worker.py", line 450, in 
mapper
       result = tuple(f(*[a[o] for o in arg_offsets]) for (arg_offsets, f) in 
udfs)
     File "/.../spark/python/lib/pyspark.zip/pyspark/worker.py", line 450, in 
<genexpr>
       result = tuple(f(*[a[o] for o in arg_offsets]) for (arg_offsets, f) in 
udfs)
     File "/.../spark/python/lib/pyspark.zip/pyspark/worker.py", line 90, in 
<lambda>
       return lambda *a: f(*a)
     File "/.../spark/python/lib/pyspark.zip/pyspark/util.py", line 107, in 
wrapper
       return f(*args, **kwargs)
     File "<stdin>", line 3, in divide_by_zero
   ZeroDivisionError: division by zero
   
        at 
org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:516)
        at 
org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$2.read(PythonUDFRunner.scala:81)
        at 
org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$2.read(PythonUDFRunner.scala:64)
        at 
org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:469)
        at 
org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
        at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:489)
        at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
        at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
        at 
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage2.processNext(Unknown
 Source)
        at 
org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
        at 
org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
        at 
org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
        at 
org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
        at 
org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
        at org.apache.spark.scheduler.Task.run(Task.scala:127)
        at 
org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:469)
        at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:472)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:748)
   
   Driver stacktrace:
        at 
org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2117)
        at 
org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2066)
        at 
org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2065)
        at 
scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
        at 
scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
        at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
        at 
org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2065)
        at 
org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1021)
        at 
org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1021)
        at scala.Option.foreach(Option.scala:407)
        at 
org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1021)
        at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2297)
        at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2246)
        at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2235)
        at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
        at 
org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:823)
        at org.apache.spark.SparkContext.runJob(SparkContext.scala:2108)
        at org.apache.spark.SparkContext.runJob(SparkContext.scala:2129)
        at org.apache.spark.SparkContext.runJob(SparkContext.scala:2148)
        at 
org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:467)
        at 
org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:420)
        at 
org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:47)
        at org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:3653)
        at org.apache.spark.sql.Dataset.$anonfun$head$1(Dataset.scala:2695)
        at 
org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3644)
        at 
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)
        at 
org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)
        at 
org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)
        at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:763)
        at 
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
        at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3642)
        at org.apache.spark.sql.Dataset.head(Dataset.scala:2695)
        at org.apache.spark.sql.Dataset.take(Dataset.scala:2902)
        at org.apache.spark.sql.Dataset.getRows(Dataset.scala:300)
        at org.apache.spark.sql.Dataset.showString(Dataset.scala:337)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
        at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
        at py4j.Gateway.invoke(Gateway.java:282)
        at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
        at py4j.commands.CallCommand.execute(CallCommand.java:79)
        at py4j.GatewayConnection.run(GatewayConnection.java:238)
        at java.lang.Thread.run(Thread.java:748)
   Caused by: org.apache.spark.api.python.PythonException: Traceback (most 
recent call last):
     File "/.../spark/python/lib/pyspark.zip/pyspark/worker.py", line 605, in 
main
       process()
     File "/.../spark/python/lib/pyspark.zip/pyspark/worker.py", line 597, in 
process
       serializer.dump_stream(out_iter, outfile)
     File "/.../spark/python/lib/pyspark.zip/pyspark/serializers.py", line 223, 
in dump_stream
       self.serializer.dump_stream(self._batched(iterator), stream)
     File "/.../spark/python/lib/pyspark.zip/pyspark/serializers.py", line 141, 
in dump_stream
       for obj in iterator:
     File "/.../spark/python/lib/pyspark.zip/pyspark/serializers.py", line 212, 
in _batched
       for item in iterator:
     File "/.../spark/python/lib/pyspark.zip/pyspark/worker.py", line 450, in 
mapper
       result = tuple(f(*[a[o] for o in arg_offsets]) for (arg_offsets, f) in 
udfs)
     File "/.../spark/python/lib/pyspark.zip/pyspark/worker.py", line 450, in 
<genexpr>
       result = tuple(f(*[a[o] for o in arg_offsets]) for (arg_offsets, f) in 
udfs)
     File "/.../spark/python/lib/pyspark.zip/pyspark/worker.py", line 90, in 
<lambda>
       return lambda *a: f(*a)
     File "/.../spark/python/lib/pyspark.zip/pyspark/util.py", line 107, in 
wrapper
       return f(*args, **kwargs)
     File "<stdin>", line 3, in divide_by_zero
   ZeroDivisionError: division by zero
   
        at 
org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:516)
        at 
org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$2.read(PythonUDFRunner.scala:81)
        at 
org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$2.read(PythonUDFRunner.scala:64)
        at 
org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:469)
        at 
org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
        at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:489)
        at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
        at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:458)
        at 
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage2.processNext(Unknown
 Source)
        at 
org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
        at 
org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:753)
        at 
org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:340)
        at 
org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:898)
        at 
org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:898)
        at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:373)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:337)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
        at org.apache.spark.scheduler.Task.run(Task.scala:127)
        at 
org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:469)
        at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:472)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        ... 1 more
   ```
   




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to