[
https://issues.apache.org/jira/browse/SPARK-30853?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
]
Yuming Wang updated SPARK-30853:
--------------------------------
Attachment: heap dump.png
> Error communicating with MapOutputTracker issue
> -----------------------------------------------
>
> Key: SPARK-30853
> URL: https://issues.apache.org/jira/browse/SPARK-30853
> Project: Spark
> Issue Type: Improvement
> Components: Spark Core
> Affects Versions: 2.3.4
> Reporter: Yuming Wang
> Priority: Major
> Attachments: heap dump.png
>
>
> {noformat}
> org.apache.spark.SparkException: Error communicating with MapOutputTracker
> at
> org.apache.spark.MapOutputTracker.askTracker(MapOutputTracker.scala:277)
> at
> org.apache.spark.MapOutputTrackerWorker.getStatuses(MapOutputTracker.scala:894)
> at
> org.apache.spark.MapOutputTrackerWorker.getMapSizesByExecutorId(MapOutputTracker.scala:825)
> at
> org.apache.spark.shuffle.BlockStoreShuffleReader.read(BlockStoreShuffleReader.scala:56)
> at
> org.apache.spark.sql.execution.ShuffledRowRDD.compute(ShuffledRowRDD.scala:174)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:325)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:289)
> at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:59)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:325)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:289)
> at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:59)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:325)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:289)
> at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:59)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:325)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:289)
> at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:151)
> at org.apache.spark.scheduler.Task.run(Task.scala:120)
> at
> org.apache.spark.executor.Executor$TaskRunner$$anonfun$run$1.apply$mcV$sp(Executor.scala:408)
> at
> org.apache.spark.executor.Executor$TaskRunner$$anon$3.run(Executor.scala:341)
> at java.security.AccessController.doPrivileged(Native Method)
> at javax.security.auth.Subject.doAs(Subject.java:422)
> at
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1869)
> at
> org.apache.spark.executor.Executor$TaskRunner.withinTaskUGI(Executor.scala:340)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> at java.lang.Thread.run(Thread.java:748)
> Caused by: org.apache.spark.rpc.RpcTimeoutException: Futures timed out after
> [300 seconds]. This timeout is controlled by spark.network.timeout
> at
> org.apache.spark.rpc.RpcTimeout.org$apache$spark$rpc$RpcTimeout$$createRpcTimeoutException(RpcTimeout.scala:47)
> at
> org.apache.spark.rpc.RpcTimeout$$anonfun$addMessageIfTimeout$1.applyOrElse(RpcTimeout.scala:62)
> at
> org.apache.spark.rpc.RpcTimeout$$anonfun$addMessageIfTimeout$1.applyOrElse(RpcTimeout.scala:58)
> at
> scala.runtime.AbstractPartialFunction.apply(AbstractPartialFunction.scala:36)
> at org.apache.spark.rpc.RpcTimeout.awaitResult(RpcTimeout.scala:76)
> at org.apache.spark.rpc.RpcEndpointRef.askSync(RpcEndpointRef.scala:92)
> at org.apache.spark.rpc.RpcEndpointRef.askSync(RpcEndpointRef.scala:76)
> at
> org.apache.spark.MapOutputTracker.askTracker(MapOutputTracker.scala:273)
> ... 27 more
> Caused by: java.util.concurrent.TimeoutException: Futures timed out after
> [300 seconds]
> at scala.concurrent.impl.Promise$DefaultPromise.ready(Promise.scala:219)
> at
> scala.concurrent.impl.Promise$DefaultPromise.result(Promise.scala:223)
> at org.apache.spark.util.ThreadUtils$.awaitResult(ThreadUtils.scala:217)
> at org.apache.spark.rpc.RpcTimeout.awaitResult(RpcTimeout.scala:75)
> ... 30 more
> {noformat}
--
This message was sent by Atlassian Jira
(v8.3.4#803005)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]