dongjoon-hyun commented on code in PR #37924:
URL: https://github.com/apache/spark/pull/37924#discussion_r973809002
##########
core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala:
##########
@@ -1140,6 +1146,38 @@ class DAGSchedulerSuite extends SparkFunSuite with
TempLocalSparkContext with Ti
assertDataStructuresEmpty()
}
+ test("SPARK-40481: Multiple consecutive stage fetch failures should not fail
job " +
+ "when ignoreOnDecommissionFetchFailure is enabled.") {
+ conf.set(config.STAGE_IGNORE_DECOMMISSION_FETCH_FAILURE.key, "true")
+
+ setupStageAbortTest(sc)
+ val parts = 2
+ val shuffleMapRdd = new MyRDD(sc, parts, Nil)
+ val shuffleDep = new ShuffleDependency(shuffleMapRdd, new
HashPartitioner(parts))
+ val reduceRdd = new MyRDD(sc, parts, List(shuffleDep), tracker =
mapOutputTracker)
+ submit(reduceRdd, (0 until parts).toArray)
+
+ for (attempt <- 0 until scheduler.maxConsecutiveStageAttempts) {
+ // Complete all the tasks for the current attempt of stage 0 successfully
+ completeShuffleMapStageSuccessfully(0, attempt, numShufflePartitions =
parts,
+ Seq("hostA", "hostB"))
+
+ taskScheduler.executorDecommission("hostA-exec",
ExecutorDecommissionInfo(""))
+ // Now we should have a new taskSet, for a new attempt of stage 1.
+ // Fail all these tasks with FetchFailure
+ completeNextStageWithFetchFailure(1, attempt, shuffleDep)
+
+ // this will trigger a resubmission of stage 0, since we've lost some of
its
+ // map output, for the next iteration through the loop
+ scheduler.resubmitFailedStages()
+ }
+
+ // Confirm job finished successfully
+ sc.listenerBus.waitUntilEmpty()
+ assert(scheduler.runningStages.nonEmpty)
+ assert(!ended)
+
Review Comment:
Shall we clean up this redundant empty line?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]