Github user dvogelbacher commented on the issue:
https://github.com/apache/spark/pull/20779
I was able to reproduce just now without changing the value of the constant
(i.e., with unmodified code from master):
> â spark git:(master) ./bin/spark-shell
18/03/09 11:11:02 WARN Utils: Your hostname, dvogelbac resolves to a
loopback address: 127.0.0.1; using 10.111.11.111 instead (on interface en0)
18/03/09 11:11:02 WARN Utils: Set SPARK_LOCAL_IP if you need to bind to
another address
18/03/09 11:11:02 WARN NativeCodeLoader: Unable to load native-hadoop
library for your platform... using builtin-java classes where applicable
Using Spark's default log4j profile:
org/apache/spark/log4j-defaults.properties
Setting default log level to "WARN".
To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use
setLogLevel(newLevel).
Spark context Web UI available at http://10.111.11.111:4040
Spark context available as 'sc' (master = local[*], app id =
local-1520593867110).
Spark session available as 'spark'.
Welcome to
____ __
/ __/__ ___ _____/ /__
_\ \/ _ \/ _ `/ __/ '_/
/___/ .__/\_,_/_/ /_/\_\ version 2.4.0-SNAPSHOT
/_/
Using Scala version 2.11.8 (Java HotSpot(TM) 64-Bit Server VM, Java
1.8.0_121)
Type in expressions to have them evaluated.
Type :help for more information.
scala> spark.conf.set("spark.sql.shuffle.partitions", 1)
scala> val df_pet_age = Seq((8, "bat"), (15, "mouse"), (5,
"horse")).toDF("age", "name")
df_pet_age: org.apache.spark.sql.DataFrame = [age: int, name: string]
scala>
df_pet_age.groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").
alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("
age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).
groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).groupBy("name").agg(avg("age").alias("age")).limit(1).show()
[Stage 1:> (0 + 1)
/ 1]18/03/09 11:11:21 ERROR Executor: Exception in task 0.0 in stage 1.0 (TID 3)
java.lang.IllegalAccessError: tried to access method
org.apache.spark.sql.execution.BufferedRowIterator.shouldStop()Z from class
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage2$agg_NestedClass
at
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage2$agg_NestedClass.agg_doAggregateWithKeys2$(Unknown
Source)
at
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage2$agg_NestedClass.agg_doAggregateWithKeys1$(Unknown
Source)
at
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage2$agg_NestedClass.agg_doAggregateWithKeys$(Unknown
Source)
at
org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage2.processNext(Unknown
Source)
at
org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
at
org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$11$$anon$1.hasNext(WholeStageCodegenExec.scala:616)
at
org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:253)
at
org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:247)
at
org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:830)
at
org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:830)
at
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
at
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
at org.apache.spark.scheduler.Task.run(Task.scala:109)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]