[
https://issues.apache.org/jira/browse/SPARK-12695?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
]
eugeny birukov updated SPARK-12695:
-----------------------------------
Description:
I process kinesis stream in python code:
stream = KinesisUtils.createStream(ssc, appName, streamName, endpointUrl,
regionName, InitialPositionInStream.LATEST, 30)
stream.map(lambda line: str(line)).foreachRDD(process)
def process(time, rdd):
sqlContext = SQLContext.getOrCreate(rdd.context)
t = sqlContext.read.json()
run and get exception
org.apache.spark.SparkException: An exception was raised by Python:
Traceback (most recent call last):
File "/usr/local/spark/python/lib/pyspark.zip/pyspark/streaming/util.py",
line 65, in call
r = self.func(t, *rdds)
File "/usr/local/spark-1.6.0-bin-hadoop2.4/bin/kinesis_test.py", line 26, in
process
t = sqlContext.read.json(rdd.map(lambda line: str(line)))
File "/usr/local/spark/python/lib/pyspark.zip/pyspark/sql/readwriter.py",
line 180, in json
return self._df(self._jreader.json(path._jrdd))
File "/usr/local/spark/python/lib/py4j-0.9-src.zip/py4j/java_gateway.py",
line 813, in __call__
answer, self.gateway_client, self.target_id, self.name)
File "/usr/local/spark/python/lib/pyspark.zip/pyspark/sql/utils.py", line 45,
in deco
return f(*a, **kw)
File "/usr/local/spark/python/lib/py4j-0.9-src.zip/py4j/protocol.py", line
308, in get_return_value
format(target_id, ".", name), value)
Py4JJavaError: An error occurred while calling o165.json.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in
stage 1.0 failed 1 times, most recent failure: Lost task 0.0 in stage 1.0 (TID
1, localhost): java.lang.ClassCastException: [B cannot be cast to
java.lang.String
at
org.apache.spark.sql.execution.datasources.json.InferSchema$$anonfun$1$$anonfun$apply$1.apply(InferSchema.scala:53)
at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
at scala.collection.Iterator$class.foreach(Iterator.scala:727)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
at
scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:144)
at scala.collection.AbstractIterator.foldLeft(Iterator.scala:1157)
at
scala.collection.TraversableOnce$class.aggregate(TraversableOnce.scala:201)
at scala.collection.AbstractIterator.aggregate(Iterator.scala:1157)
at
org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1$$anonfun$23.apply(RDD.scala:1121)
at
org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1$$anonfun$23.apply(RDD.scala:1121)
at
org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1$$anonfun$24.apply(RDD.scala:1122)
at
org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1$$anonfun$24.apply(RDD.scala:1122)
at
org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$20.apply(RDD.scala:710)
at
org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$20.apply(RDD.scala:710)
at
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
at
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
at
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)
at
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
at org.apache.spark.scheduler.Task.run(Task.scala:89)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
Driver stacktrace:
at
org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1431)
....
in java same code has no exceptions
streams.map(x -> new String(x))
.foreachRDD((JavaRDD<String> rdd) -> {
SQLContext sqlContext =
SQLContext.getOrCreate(rdd.context());
sqlContext.read().json(rdd).registerTempTable("jtable");
DataFrame df = sqlContext.sql("select
count(appName), appName, type from jtable group by appName, type order by
appName ");
df.show(200);
});
was:
I process kinesis stream in python code:
stream = KinesisUtils.createStream(ssc, appName, streamName, endpointUrl,
regionName, InitialPositionInStream.LATEST, 30)
stream.map(lambda line: str(line)).foreachRDD(process)
def process(time, rdd):
sqlContext = SQLContext.getOrCreate(rdd.context)
t = sqlContext.read.json()
run and get exception
org.apache.spark.SparkException: An exception was raised by Python:
Traceback (most recent call last):
File "/usr/local/spark/python/lib/pyspark.zip/pyspark/streaming/util.py",
line 65, in call
r = self.func(t, *rdds)
File "/usr/local/spark-1.6.0-bin-hadoop2.4/bin/kinesis_test.py", line 26, in
process
t = sqlContext.read.json(rdd.map(lambda line: str(line)))
File "/usr/local/spark/python/lib/pyspark.zip/pyspark/sql/readwriter.py",
line 180, in json
return self._df(self._jreader.json(path._jrdd))
File "/usr/local/spark/python/lib/py4j-0.9-src.zip/py4j/java_gateway.py",
line 813, in __call__
answer, self.gateway_client, self.target_id, self.name)
File "/usr/local/spark/python/lib/pyspark.zip/pyspark/sql/utils.py", line 45,
in deco
return f(*a, **kw)
File "/usr/local/spark/python/lib/py4j-0.9-src.zip/py4j/protocol.py", line
308, in get_return_value
format(target_id, ".", name), value)
Py4JJavaError: An error occurred while calling o165.json.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in
stage 1.0 failed 1 times, most recent failure: Lost task 0.0 in stage 1.0 (TID
1, localhost): java.lang.ClassCastException: [B cannot be cast to
java.lang.String
at
org.apache.spark.sql.execution.datasources.json.InferSchema$$anonfun$1$$anonfun$apply$1.apply(InferSchema.scala:53)
at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
at scala.collection.Iterator$class.foreach(Iterator.scala:727)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
at
scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:144)
at scala.collection.AbstractIterator.foldLeft(Iterator.scala:1157)
at
scala.collection.TraversableOnce$class.aggregate(TraversableOnce.scala:201)
at scala.collection.AbstractIterator.aggregate(Iterator.scala:1157)
at
org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1$$anonfun$23.apply(RDD.scala:1121)
at
org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1$$anonfun$23.apply(RDD.scala:1121)
at
org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1$$anonfun$24.apply(RDD.scala:1122)
at
org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1$$anonfun$24.apply(RDD.scala:1122)
at
org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$20.apply(RDD.scala:710)
at
org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$20.apply(RDD.scala:710)
at
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
at
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
at
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)
at
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
at org.apache.spark.scheduler.Task.run(Task.scala:89)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
Driver stacktrace:
at
org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1431)
....
> java.lang.ClassCastException: [B cannot be cast to java.lang.String
> -------------------------------------------------------------------
>
> Key: SPARK-12695
> URL: https://issues.apache.org/jira/browse/SPARK-12695
> Project: Spark
> Issue Type: Bug
> Components: PySpark
> Affects Versions: 1.6.0
> Reporter: eugeny birukov
> Attachments: exception_SPARK-12695.log
>
>
> I process kinesis stream in python code:
> stream = KinesisUtils.createStream(ssc, appName, streamName, endpointUrl,
> regionName, InitialPositionInStream.LATEST, 30)
> stream.map(lambda line: str(line)).foreachRDD(process)
> def process(time, rdd):
> sqlContext = SQLContext.getOrCreate(rdd.context)
> t = sqlContext.read.json()
> run and get exception
> org.apache.spark.SparkException: An exception was raised by Python:
> Traceback (most recent call last):
> File "/usr/local/spark/python/lib/pyspark.zip/pyspark/streaming/util.py",
> line 65, in call
> r = self.func(t, *rdds)
> File "/usr/local/spark-1.6.0-bin-hadoop2.4/bin/kinesis_test.py", line 26,
> in process
> t = sqlContext.read.json(rdd.map(lambda line: str(line)))
> File "/usr/local/spark/python/lib/pyspark.zip/pyspark/sql/readwriter.py",
> line 180, in json
> return self._df(self._jreader.json(path._jrdd))
> File "/usr/local/spark/python/lib/py4j-0.9-src.zip/py4j/java_gateway.py",
> line 813, in __call__
> answer, self.gateway_client, self.target_id, self.name)
> File "/usr/local/spark/python/lib/pyspark.zip/pyspark/sql/utils.py", line
> 45, in deco
> return f(*a, **kw)
> File "/usr/local/spark/python/lib/py4j-0.9-src.zip/py4j/protocol.py", line
> 308, in get_return_value
> format(target_id, ".", name), value)
> Py4JJavaError: An error occurred while calling o165.json.
> : org.apache.spark.SparkException: Job aborted due to stage failure: Task 0
> in stage 1.0 failed 1 times, most recent failure: Lost task 0.0 in stage 1.0
> (TID 1, localhost): java.lang.ClassCastException: [B cannot be cast to
> java.lang.String
> at
> org.apache.spark.sql.execution.datasources.json.InferSchema$$anonfun$1$$anonfun$apply$1.apply(InferSchema.scala:53)
> at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
> at scala.collection.Iterator$class.foreach(Iterator.scala:727)
> at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
> at
> scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:144)
> at scala.collection.AbstractIterator.foldLeft(Iterator.scala:1157)
> at
> scala.collection.TraversableOnce$class.aggregate(TraversableOnce.scala:201)
> at scala.collection.AbstractIterator.aggregate(Iterator.scala:1157)
> at
> org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1$$anonfun$23.apply(RDD.scala:1121)
> at
> org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1$$anonfun$23.apply(RDD.scala:1121)
> at
> org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1$$anonfun$24.apply(RDD.scala:1122)
> at
> org.apache.spark.rdd.RDD$$anonfun$treeAggregate$1$$anonfun$24.apply(RDD.scala:1122)
> at
> org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$20.apply(RDD.scala:710)
> at
> org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$20.apply(RDD.scala:710)
> at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
> at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:306)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:270)
> at
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)
> at
> org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
> at org.apache.spark.scheduler.Task.run(Task.scala:89)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
> at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
> at java.lang.Thread.run(Thread.java:745)
> Driver stacktrace:
> at
> org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1431)
> ....
> in java same code has no exceptions
> streams.map(x -> new String(x))
> .foreachRDD((JavaRDD<String> rdd) -> {
> SQLContext sqlContext =
> SQLContext.getOrCreate(rdd.context());
>
> sqlContext.read().json(rdd).registerTempTable("jtable");
> DataFrame df = sqlContext.sql("select
> count(appName), appName, type from jtable group by appName, type order by
> appName ");
> df.show(200);
> });
--
This message was sent by Atlassian JIRA
(v6.3.4#6332)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]