Yaniv Kempler created SPARK-32081:
-------------------------------------

             Summary: facing Invalid UTF-32 character v2.4.5 running pyspark
                 Key: SPARK-32081
                 URL: https://issues.apache.org/jira/browse/SPARK-32081
             Project: Spark
          Issue Type: Bug
          Components: EC2
    Affects Versions: 2.4.5
            Reporter: Yaniv Kempler


facing Invalid UTF-32 character while reading json files

 

Py4JJavaError Traceback (most recent call last) <timed exec> in <module> 
~/.local/lib/python3.6/site-packages/pyspark/sql/readwriter.py in json(self, 
path, schema, primitivesAsString, prefersDecimal, allowComments, 
allowUnquotedFieldNames, allowSingleQuotes, allowNumericLeadingZero, 
allowBackslashEscapingAnyCharacter, mode, columnNameOfCorruptRecord, 
dateFormat, timestampFormat, multiLine, allowUnquotedControlChars, lineSep, 
samplingRatio, dropFieldIfAllNull, encoding)  284 keyed._bypass_serializer = 
True  285 jrdd = keyed._jrdd.map(self._spark._jvm.BytesToString()) --> 286 
return self._df(self._jreader.json(jrdd))  287 else:  288 raise TypeError("path 
can be only string, list or RDD") 
~/.local/lib/python3.6/site-packages/py4j/java_gateway.py in __call__(self, 
*args)  1255 answer = self.gateway_client.send_command(command)  1256 
return_value = get_return_value( -> 1257 answer, self.gateway_client, 
self.target_id, self.name)  1258  1259 for temp_arg in temp_args: 
~/.local/lib/python3.6/site-packages/pyspark/sql/utils.py in deco(*a, **kw)  61 
def deco(*a, **kw):  62 try: ---> 63 return f(*a, **kw)  64 except 
py4j.protocol.Py4JJavaError as e:  65 s = e.java_exception.toString() 
~/.local/lib/python3.6/site-packages/py4j/protocol.py in 
get_return_value(answer, gateway_client, target_id, name)  326 raise 
Py4JJavaError(  327 "An error occurred while calling \{0}{1}\{2}.\n". --> 328 
format(target_id, ".", name), value)  329 else:  330 raise Py4JError( 
Py4JJavaError: An error occurred while calling o67.json. : 
org.apache.spark.SparkException: Job aborted due to stage failure: Task 546 in 
stage 0.0 failed 4 times, most recent failure: Lost task 546.3 in stage 0.0 
(TID 642, 172.31.30.196, executor 1): java.io.CharConversionException: Invalid 
UTF-32 character 0x1000000(above 10ffff) at char #206, byte #827) at 
com.fasterxml.jackson.core.io.UTF32Reader.reportInvalid(UTF32Reader.java:189) 
at com.fasterxml.jackson.core.io.UTF32Reader.read(UTF32Reader.java:150) at 
com.fasterxml.jackson.core.json.ReaderBasedJsonParser.loadMore(ReaderBasedJsonParser.java:153)
 at 
com.fasterxml.jackson.core.json.ReaderBasedJsonParser._skipWSOrEnd(ReaderBasedJsonParser.java:2017)
 at 
com.fasterxml.jackson.core.json.ReaderBasedJsonParser.nextToken(ReaderBasedJsonParser.java:577)
 at 
org.apache.spark.sql.catalyst.json.JsonInferSchema$$anonfun$1$$anonfun$apply$1$$anonfun$apply$3.apply(JsonInferSchema.scala:56)
 at 
org.apache.spark.sql.catalyst.json.JsonInferSchema$$anonfun$1$$anonfun$apply$1$$anonfun$apply$3.apply(JsonInferSchema.scala:55)
 at org.apache.spark.util.Utils$.tryWithResource(Utils.scala:2543) at 
org.apache.spark.sql.catalyst.json.JsonInferSchema$$anonfun$1$$anonfun$apply$1.apply(JsonInferSchema.scala:55)
 at 
org.apache.spark.sql.catalyst.json.JsonInferSchema$$anonfun$1$$anonfun$apply$1.apply(JsonInferSchema.scala:53)
 at scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:435) at 
scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:441) at 
scala.collection.Iterator$class.foreach(Iterator.scala:891) at 
scala.collection.AbstractIterator.foreach(Iterator.scala:1334) at 
scala.collection.TraversableOnce$class.reduceLeft(TraversableOnce.scala:185) at 
scala.collection.AbstractIterator.reduceLeft(Iterator.scala:1334) at 
scala.collection.TraversableOnce$class.reduceLeftOption(TraversableOnce.scala:203)
 at scala.collection.AbstractIterator.reduceLeftOption(Iterator.scala:1334) at 
scala.collection.TraversableOnce$class.reduceOption(TraversableOnce.scala:210) 
at scala.collection.AbstractIterator.reduceOption(Iterator.scala:1334) at 
org.apache.spark.sql.catalyst.json.JsonInferSchema$$anonfun$1.apply(JsonInferSchema.scala:70)
 at 
org.apache.spark.sql.catalyst.json.JsonInferSchema$$anonfun$1.apply(JsonInferSchema.scala:50)
 at 
org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:823)
 at 
org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:823)
 at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52) at 
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:346) at 
org.apache.spark.rdd.RDD.iterator(RDD.scala:310) at 
org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90) at 
org.apache.spark.scheduler.Task.run(Task.scala:123) at 
org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
 at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360) at 
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414) at 
java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
 at 
java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
 at java.base/java.lang.Thread.run(Thread.java:834) Driver stacktrace: at 
org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1891)
 at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1879)
 at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1878)
 at 
scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) 
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48) at 
org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1878) at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:927)
 at 
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:927)
 at scala.Option.foreach(Option.scala:257) at 
org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:927)
 at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2112)
 at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2061)
 at 
org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2050)
 at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49) at 
org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:738) at 
org.apache.spark.SparkContext.runJob(SparkContext.scala:2061) at 
org.apache.spark.SparkContext.runJob(SparkContext.scala:2158) at 
org.apache.spark.sql.catalyst.json.JsonInferSchema$.infer(JsonInferSchema.scala:83)
 at 
org.apache.spark.sql.execution.datasources.json.TextInputJsonDataSource$$anonfun$inferFromDataset$1.apply(JsonDataSource.scala:109)
 at 
org.apache.spark.sql.execution.datasources.json.TextInputJsonDataSource$$anonfun$inferFromDataset$1.apply(JsonDataSource.scala:109)
 at 
org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:127)
 at 
org.apache.spark.sql.execution.datasources.json.TextInputJsonDataSource$.inferFromDataset(JsonDataSource.scala:108)
 at 
org.apache.spark.sql.DataFrameReader$$anonfun$2.apply(DataFrameReader.scala:440)
 at 
org.apache.spark.sql.DataFrameReader$$anonfun$2.apply(DataFrameReader.scala:440)
 at scala.Option.getOrElse(Option.scala:121) at 
org.apache.spark.sql.DataFrameReader.json(DataFrameReader.scala:439) at 
org.apache.spark.sql.DataFrameReader.json(DataFrameReader.scala:420) at 
org.apache.spark.sql.DataFrameReader.json(DataFrameReader.scala:406) at 
sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) 
at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
 at java.lang.reflect.Method.invoke(Method.java:498) at 
py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244) at 
py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357) at 
py4j.Gateway.invoke(Gateway.java:282) at 
py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132) at 
py4j.commands.CallCommand.execute(CallCommand.java:79) at 
py4j.GatewayConnection.run(GatewayConnection.java:238) at 
java.lang.Thread.run(Thread.java:748) Caused by: 
java.io.CharConversionException: Invalid UTF-32 character 0x1000000(above 
10ffff) at char #206, byte #827) at 
com.fasterxml.jackson.core.io.UTF32Reader.reportInvalid(UTF32Reader.java:189) 
at com.fasterxml.jackson.core.io.UTF32Reader.read(UTF32Reader.java:150) at 
com.fasterxml.jackson.core.json.ReaderBasedJsonParser.loadMore(ReaderBasedJsonParser.java:153)
 at 
com.fasterxml.jackson.core.json.ReaderBasedJsonParser._skipWSOrEnd(ReaderBasedJsonParser.java:2017)
 at 
com.fasterxml.jackson.core.json.ReaderBasedJsonParser.nextToken(ReaderBasedJsonParser.java:577)
 at 
org.apache.spark.sql.catalyst.json.JsonInferSchema$$anonfun$1$$anonfun$apply$1$$anonfun$apply$3.apply(JsonInferSchema.scala:56)
 at 
org.apache.spark.sql.catalyst.json.JsonInferSchema$$anonfun$1$$anonfun$apply$1$$anonfun$apply$3.apply(JsonInferSchema.scala:55)
 at org.apache.spark.util.Utils$.tryWithResource(Utils.scala:2543) at 
org.apache.spark.sql.catalyst.json.JsonInferSchema$$anonfun$1$$anonfun$apply$1.apply(JsonInferSchema.scala:55)
 at 
org.apache.spark.sql.catalyst.json.JsonInferSchema$$anonfun$1$$anonfun$apply$1.apply(JsonInferSchema.scala:53)
 at scala.collection.Iterator$$anon$12.nextCur(Iterator.scala:435) at 
scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:441) at 
scala.collection.Iterator$class.foreach(Iterator.scala:891) at 
scala.collection.AbstractIterator.foreach(Iterator.scala:1334) at 
scala.collection.TraversableOnce$class.reduceLeft(TraversableOnce.scala:185) at 
scala.collection.AbstractIterator.reduceLeft(Iterator.scala:1334) at 
scala.collection.TraversableOnce$class.reduceLeftOption(TraversableOnce.scala:203)
 at scala.collection.AbstractIterator.reduceLeftOption(Iterator.scala:1334) at 
scala.collection.TraversableOnce$class.reduceOption(TraversableOnce.scala:210) 
at scala.collection.AbstractIterator.reduceOption(Iterator.scala:1334) at 
org.apache.spark.sql.catalyst.json.JsonInferSchema$$anonfun$1.apply(JsonInferSchema.scala:70)
 at 
org.apache.spark.sql.catalyst.json.JsonInferSchema$$anonfun$1.apply(JsonInferSchema.scala:50)
 at 
org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:823)
 at 
org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$23.apply(RDD.scala:823)
 at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52) at 
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:346) at 
org.apache.spark.rdd.RDD.iterator(RDD.scala:310) at 
org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90) at 
org.apache.spark.scheduler.Task.run(Task.scala:123) at 
org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
 at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360) at 
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414) at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128) 
at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) 
at java.lang.Thread.run(Thread.java:834)



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to