I am using PySpark through JupyterLab using the Spark distibution provided
with *conda install pyspark*. So I run Spark locally.

I started using pyspark 2.4.0 but I had a Socket issue which I solved with
downgrading the package to 2.3.2.

So I am using pyspark 2.3.2 at the moment.

I am trying to do an orderBy on one of my dataframe. 

That way : dfRequestwithTime = dfRequestwithTime.orderBy('time', ascending =
False)

but I get the following error. (I am sure that this is that line which is
the issue since without I do not get any error.)

This dataframe contains 2 columns:
- a column "request" containing strings
- a column "time" which is in fact a duration containing integers

Here the first 2 rows.

[Row(request='SELECT XXX FROM XXX WHERE XXX ', time=4),
 Row(request='SELECT XXX FROM XXX WHERE XXX ', time=1)]

I am getting my duration thanks to RegEx collected earlier in the notebook.
I casted my "time" column to Integer and droped NaN on my dataset.

I get the same following error if I try to do an orderBy on my 'request'
column.



---------------------------------------------------------------------------
Py4JJavaError                             Traceback (most recent call last)
<ipython-input-461-17cd191c67c0> in <module>
----> 1 dfRequestwithTime.head(5)

~\Anaconda3\lib\site-packages\pyspark\sql\dataframe.py in head(self, n)
   1132             rs = self.head(1)
   1133             return rs[0] if rs else None
-> 1134         return self.take(n)
   1135 
   1136     @ignore_unicode_prefix

~\Anaconda3\lib\site-packages\pyspark\sql\dataframe.py in take(self, num)
    502         [Row(age=2, name=u'Alice'), Row(age=5, name=u'Bob')]
    503         """
--> 504         return self.limit(num).collect()
    505 
    506     @since(1.3)

~\Anaconda3\lib\site-packages\pyspark\sql\dataframe.py in collect(self)
    464         """
    465         with SCCallSiteSync(self._sc) as css:
--> 466             sock_info = self._jdf.collectToPython()
    467         return list(_load_from_socket(sock_info,
BatchedSerializer(PickleSerializer())))
    468 

~\Anaconda3\lib\site-packages\py4j\java_gateway.py in __call__(self, *args)
   1255         answer = self.gateway_client.send_command(command)
   1256         return_value = get_return_value(
-> 1257             answer, self.gateway_client, self.target_id, self.name)
   1258 
   1259         for temp_arg in temp_args:

~\Anaconda3\lib\site-packages\pyspark\sql\utils.py in deco(*a, **kw)
     61     def deco(*a, **kw):
     62         try:
---> 63             return f(*a, **kw)
     64         except py4j.protocol.Py4JJavaError as e:
     65             s = e.java_exception.toString()

~\Anaconda3\lib\site-packages\py4j\protocol.py in get_return_value(answer,
gateway_client, target_id, name)
    326                 raise Py4JJavaError(
    327                     "An error occurred while calling {0}{1}{2}.\n".
--> 328                     format(target_id, ".", name), value)
    329             else:
    330                 raise Py4JError(

Py4JJavaError: An error occurred while calling o5404.collectToPython.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 26
in stage 190.0 failed 1 times, most recent failure: Lost task 26.0 in stage
190.0 (TID 4856, localhost, executor driver):
org.apache.spark.api.python.PythonException: Traceback (most recent call
last):
  File
"C:\Users\tryck\Anaconda3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\worker.py",
line 253, in main
  File
"C:\Users\tryck\Anaconda3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\worker.py",
line 248, in process
  File
"C:\Users\tryck\Anaconda3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\serializers.py",
line 331, in dump_stream
    self.serializer.dump_stream(self._batched(iterator), stream)
  File
"C:\Users\tryck\Anaconda3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\serializers.py",
line 140, in dump_stream
    for obj in iterator:
  File
"C:\Users\tryck\Anaconda3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\serializers.py",
line 320, in _batched
    for item in iterator:
  File "<string>", line 1, in <lambda>
  File
"C:\Users\tryck\Anaconda3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\worker.py",
line 76, in <lambda>
  File
"C:\Users\tryck\Anaconda3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\util.py",
line 55, in wrapper
    return f(*args, **kwargs)
  File
"C:\Users\tryck\Anaconda3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\worker.py",
line 68, in <lambda>
  File "<ipython-input-454-e809ef90d785>", line 1, in <lambda>
  File "C:\Users\tryck\Anaconda3\lib\re.py", line 191, in sub
    return _compile(pattern, flags).sub(repl, string, count)
TypeError: expected string or bytes-like object

        at
org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:330)
        at
org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:83)
        at
org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:66)
        at
org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:284)
        at
org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
        at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
        at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
        at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
        at
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage3.processNext(Unknown
Source)
        at
org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
        at
org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$10$$anon$1.hasNext(WholeStageCodegenExec.scala:614)
        at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
        at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
        at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
        at scala.collection.Iterator$GroupedIterator.fill(Iterator.scala:1124)
        at 
scala.collection.Iterator$GroupedIterator.hasNext(Iterator.scala:1130)
        at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
        at scala.collection.Iterator$class.foreach(Iterator.scala:891)
        at scala.collection.AbstractIterator.foreach(Iterator.scala:1334)
        at
org.apache.spark.api.python.PythonRDD$.writeIteratorToStream(PythonRDD.scala:223)
        at
org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$2.writeIteratorToStream(PythonUDFRunner.scala:52)
        at
org.apache.spark.api.python.BasePythonRunner$WriterThread$$anonfun$run$1.apply(PythonRunner.scala:247)
        at org.apache.spark.util.Utils$.logUncaughtExceptions(Utils.scala:1992)
        at
org.apache.spark.api.python.BasePythonRunner$WriterThread.run(PythonRunner.scala:170)

Driver stacktrace:
        at
org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1651)
        at
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1639)
        at
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1638)
        at
scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
        at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
        at
org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1638)
        at
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:831)
        at
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:831)
        at scala.Option.foreach(Option.scala:257)
        at
org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:831)
        at
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1872)
        at
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1821)
        at
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1810)
        at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
        at 
org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:642)
        at org.apache.spark.SparkContext.runJob(SparkContext.scala:2034)
        at org.apache.spark.SparkContext.runJob(SparkContext.scala:2131)
        at org.apache.spark.rdd.RDD$$anonfun$reduce$1.apply(RDD.scala:1035)
        at
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
        at
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
        at org.apache.spark.rdd.RDD.withScope(RDD.scala:363)
        at org.apache.spark.rdd.RDD.reduce(RDD.scala:1017)
        at org.apache.spark.rdd.RDD$$anonfun$takeOrdered$1.apply(RDD.scala:1439)
        at
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
        at
org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
        at org.apache.spark.rdd.RDD.withScope(RDD.scala:363)
        at org.apache.spark.rdd.RDD.takeOrdered(RDD.scala:1426)
        at
org.apache.spark.sql.execution.TakeOrderedAndProjectExec.executeCollect(limit.scala:135)
        at
org.apache.spark.sql.Dataset$$anonfun$collectToPython$1.apply(Dataset.scala:3200)
        at
org.apache.spark.sql.Dataset$$anonfun$collectToPython$1.apply(Dataset.scala:3197)
        at org.apache.spark.sql.Dataset$$anonfun$52.apply(Dataset.scala:3259)
        at
org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:77)
        at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3258)
        at org.apache.spark.sql.Dataset.collectToPython(Dataset.scala:3197)
        at sun.reflect.GeneratedMethodAccessor84.invoke(Unknown Source)
        at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
        at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
        at py4j.Gateway.invoke(Gateway.java:282)
        at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
        at py4j.commands.CallCommand.execute(CallCommand.java:79)
        at py4j.GatewayConnection.run(GatewayConnection.java:238)
        at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.api.python.PythonException: Traceback (most
recent call last):
  File
"C:\Users\tryck\Anaconda3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\worker.py",
line 253, in main
  File
"C:\Users\tryck\Anaconda3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\worker.py",
line 248, in process
  File
"C:\Users\tryck\Anaconda3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\serializers.py",
line 331, in dump_stream
    self.serializer.dump_stream(self._batched(iterator), stream)
  File
"C:\Users\tryck\Anaconda3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\serializers.py",
line 140, in dump_stream
    for obj in iterator:
  File
"C:\Users\tryck\Anaconda3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\serializers.py",
line 320, in _batched
    for item in iterator:
  File "<string>", line 1, in <lambda>
  File
"C:\Users\tryck\Anaconda3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\worker.py",
line 76, in <lambda>
  File
"C:\Users\tryck\Anaconda3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\util.py",
line 55, in wrapper
    return f(*args, **kwargs)
  File
"C:\Users\tryck\Anaconda3\Lib\site-packages\pyspark\python\lib\pyspark.zip\pyspark\worker.py",
line 68, in <lambda>
  File "<ipython-input-454-e809ef90d785>", line 1, in <lambda>
  File "C:\Users\tryck\Anaconda3\lib\re.py", line 191, in sub
    return _compile(pattern, flags).sub(repl, string, count)
TypeError: expected string or bytes-like object

        at
org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:330)
        at
org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:83)
        at
org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.read(PythonUDFRunner.scala:66)
        at
org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:284)
        at
org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
        at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440)
        at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
        at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
        at
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage3.processNext(Unknown
Source)
        at
org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
        at
org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$10$$anon$1.hasNext(WholeStageCodegenExec.scala:614)
        at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
        at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
        at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
        at scala.collection.Iterator$GroupedIterator.fill(Iterator.scala:1124)
        at 
scala.collection.Iterator$GroupedIterator.hasNext(Iterator.scala:1130)
        at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:409)
        at scala.collection.Iterator$class.foreach(Iterator.scala:891)
        at scala.collection.AbstractIterator.foreach(Iterator.scala:1334)
        at
org.apache.spark.api.python.PythonRDD$.writeIteratorToStream(PythonRDD.scala:223)
        at
org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$2.writeIteratorToStream(PythonUDFRunner.scala:52)
        at
org.apache.spark.api.python.BasePythonRunner$WriterThread$$anonfun$run$1.apply(PythonRunner.scala:247)
        at org.apache.spark.util.Utils$.logUncaughtExceptions(Utils.scala:1992)
        at
org.apache.spark.api.python.BasePythonRunner$WriterThread.run(PythonRunner.scala:170)








--
Sent from: http://apache-spark-user-list.1001560.n3.nabble.com/

---------------------------------------------------------------------
To unsubscribe e-mail: [email protected]

Reply via email to