attilapiros commented on a change in pull request #28848:
URL: https://github.com/apache/spark/pull/28848#discussion_r441647042
##########
File path: core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala
##########
@@ -1939,24 +1941,22 @@ private[spark] class DAGScheduler(
hostToUnregisterOutputs: Option[String],
maybeEpoch: Option[Long] = None): Unit = {
val currentEpoch = maybeEpoch.getOrElse(mapOutputTracker.getEpoch)
+ logDebug(s"Removing executor $execId, fileLost: $fileLost, currentEpoch:
$currentEpoch")
if (!failedEpoch.contains(execId) || failedEpoch(execId) < currentEpoch) {
failedEpoch(execId) = currentEpoch
logInfo("Executor lost: %s (epoch %d)".format(execId, currentEpoch))
blockManagerMaster.removeExecutor(execId)
- if (fileLost) {
- hostToUnregisterOutputs match {
- case Some(host) =>
- logInfo("Shuffle files lost for host: %s (epoch %d)".format(host,
currentEpoch))
- mapOutputTracker.removeOutputsOnHost(host)
- case None =>
- logInfo("Shuffle files lost for executor: %s (epoch
%d)".format(execId, currentEpoch))
- mapOutputTracker.removeOutputsOnExecutor(execId)
- }
- clearCacheLocs()
-
- } else {
- logDebug("Additional executor lost message for %s (epoch
%d)".format(execId, currentEpoch))
+ }
+ if (fileLost && (!fileLostEpoch.contains(execId) || fileLostEpoch(execId)
< currentEpoch)) {
Review comment:
No as we we would like to avoid calling `clearCacheLocs()` repeatedly
for each fetch failure occurring within this stage. Fetch failures are coming
from this map side executor but there could be several reducer side executors
requesting block from that one and all would be generating a call here.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]