Github user kayousterhout commented on a diff in the pull request:
https://github.com/apache/spark/pull/4708#discussion_r26618641
--- Diff: core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala
---
@@ -1003,50 +1026,52 @@ class DAGScheduler(
if (failedEpoch.contains(execId) && smt.epoch <=
failedEpoch(execId)) {
logInfo("Ignoring possibly bogus ShuffleMapTask completion
from " + execId)
} else {
- stage.addOutputLoc(smt.partitionId, status)
+ shuffleStage.addOutputLoc(smt.partitionId, status)
}
- if (runningStages.contains(stage) &&
stage.pendingTasks.isEmpty) {
- markStageAsFinished(stage)
+ if (runningStages.contains(shuffleStage) &&
shuffleStage.pendingTasks.isEmpty) {
+ markStageAsFinished(shuffleStage)
logInfo("looking for newly runnable stages")
logInfo("running: " + runningStages)
logInfo("waiting: " + waitingStages)
logInfo("failed: " + failedStages)
- if (stage.shuffleDep.isDefined) {
- // We supply true to increment the epoch number here in
case this is a
- // recomputation of the map outputs. In that case, some
nodes may have cached
- // locations with holes (from when we detected the error)
and will need the
- // epoch incremented to refetch them.
- // TODO: Only increment the epoch number if this is not
the first time
- // we registered these map outputs.
- mapOutputTracker.registerMapOutputs(
- stage.shuffleDep.get.shuffleId,
- stage.outputLocs.map(list => if (list.isEmpty) null else
list.head).toArray,
- changeEpoch = true)
- }
+
+ // We supply true to increment the epoch number here in case
this is a
+ // recomputation of the map outputs. In that case, some
nodes may have cached
+ // locations with holes (from when we detected the error)
and will need the
+ // epoch incremented to refetch them.
+ // TODO: Only increment the epoch number if this is not the
first time
+ // we registered these map outputs.
+ mapOutputTracker.registerMapOutputs(
+ shuffleStage.shuffleDep.shuffleId,
+ shuffleStage.outputLocs.map(list => if (list.isEmpty) null
else list.head).toArray,
+ changeEpoch = true)
+
clearCacheLocs()
- if (stage.outputLocs.exists(_ == Nil)) {
- // Some tasks had failed; let's resubmit this stage
+ if (shuffleStage.outputLocs.contains(Nil)) {
+ // Some tasks had failed; let's resubmit this shuffleStage
// TODO: Lower-level scheduler should also deal with this
- logInfo("Resubmitting " + stage + " (" + stage.name +
+ logInfo("Resubmitting " + shuffleStage + " (" +
shuffleStage.name +
") because some of its tasks had failed: " +
- stage.outputLocs.zipWithIndex.filter(_._1 ==
Nil).map(_._2).mkString(", "))
- submitStage(stage)
+ shuffleStage.outputLocs.zipWithIndex.filter(_._1 ==
Nil).map(_._2).mkString(", "))
+ submitStage(shuffleStage)
} else {
val newlyRunnable = new ArrayBuffer[Stage]
- for (stage <- waitingStages) {
- logInfo("Missing parents for " + stage + ": " +
getMissingParentStages(stage))
+ for (shuffleStage <- waitingStages) {
+ logInfo("Missing parents for " + shuffleStage + ": " +
+ getMissingParentStages(shuffleStage))
}
- for (stage <- waitingStages if
getMissingParentStages(stage) == Nil) {
- newlyRunnable += stage
+ for (shuffleStage <- waitingStages if
getMissingParentStages(shuffleStage) == Nil) {
+ newlyRunnable += shuffleStage
}
waitingStages --= newlyRunnable
runningStages ++= newlyRunnable
for {
- stage <- newlyRunnable.sortBy(_.id)
- jobId <- activeJobForStage(stage)
+ shuffleStage <- newlyRunnable.sortBy(_.id)
+ jobId <- activeJobForStage(shuffleStage)
} {
- logInfo("Submitting " + stage + " (" + stage.rdd + "),
which is now runnable")
- submitMissingTasks(stage, jobId)
+ logInfo("Submitting " + shuffleStage + " (" +
+ shuffleStage.rdd + "), which is now runnable")
--- End diff --
indentation
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]