Github user ilganeli commented on a diff in the pull request:
https://github.com/apache/spark/pull/5636#discussion_r30068215
--- Diff:
core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala ---
@@ -475,7 +475,148 @@ class DAGSchedulerSuite
assert(results === Map(0 -> 42, 1 -> 43))
assertDataStructuresEmpty()
}
+
+ test("Multiple consecutive stage failures should lead to stage being
aborted.") {
+ // Create a new Listener to confirm that the listenerBus sees the
JobEnd message
+ // when we abort the stage. This message will also be consumed by the
EventLoggingListener
+ // so this will propagate up to the user.
+ var ended = false
+ var jobResult : JobResult = null
+ class EndListener extends SparkListener {
+ override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = {
+ jobResult = jobEnd.jobResult
+ ended = true
+ }
+ }
+
+ sc.listenerBus.addListener(new EndListener())
+
+ val shuffleMapRdd = new MyRDD(sc, 2, Nil)
+ val shuffleDep = new ShuffleDependency(shuffleMapRdd, null)
+ val shuffleId = shuffleDep.shuffleId
+ val reduceRdd = new MyRDD(sc, 2, List(shuffleDep))
+ submit(reduceRdd, Array(0, 1))
+
+ complete(taskSets(0), Seq(
+ (Success, makeMapStatus("hostA", 1)),
+ (Success, makeMapStatus("hostB", 1))))
+
+ for (x <- 1 to Stage.MAX_STAGE_FAILURES) {
+ // the 2nd ResultTask failed
+ complete(taskSets(1), Seq(
+ (Success, 42),
+ (FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0, 0,
"ignored"), null)))
+
+ scheduler.resubmitFailedStages()
+ if (x < Stage.MAX_STAGE_FAILURES) {
+ assert(scheduler.runningStages.nonEmpty)
+ assert(!ended)
+ } else {
+ // Stage has been aborted and removed from running stages
+ assertDataStructuresEmpty()
+ sc.listenerBus.waitUntilEmpty(1000)
+ assert(ended)
+ assert(jobResult.isInstanceOf[JobFailed])
+ }
+ }
+ }
+
+
+ test("Multiple consecutive Fetch failures in a stage triggers an
abort.") {
+ // Create a new Listener to confirm that the listenerBus sees the
JobEnd message
+ // when we abort the stage. This message will also be consumed by the
EventLoggingListener
+ // so this will propagate up to the user.
+ var ended = false
+ var jobResult : JobResult = null
+ class EndListener extends SparkListener {
+ override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = {
+ jobResult = jobEnd.jobResult
+ ended = true
+ }
+ }
+
+ sc.listenerBus.addListener(new EndListener())
+
+ val shuffleMapRdd = new MyRDD(sc, 8, Nil)
+ val shuffleDep = new ShuffleDependency(shuffleMapRdd, null)
+ val shuffleId = shuffleDep.shuffleId
+ val reduceRdd = new MyRDD(sc, 8, List(shuffleDep))
+ submit(reduceRdd, Array(0, 1, 2, 3, 4, 5, 6, 7))
+
+ complete(taskSets(0), Seq(
+ (Success, makeMapStatus("hostA", 1)),
+ (Success, makeMapStatus("hostA", 1)),
+ (Success, makeMapStatus("hostA", 1)),
+ (Success, makeMapStatus("hostA", 1)),
+ (Success, makeMapStatus("hostA", 1)),
+ (Success, makeMapStatus("hostA", 1)),
+ (Success, makeMapStatus("hostA", 1)),
+ (Success, makeMapStatus("hostB", 1))))
+
+ complete(taskSets(1), Seq(
+ (Success, 42),
+ (FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0, 0,
"ignored"), null),
+ (FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0, 0,
"ignored1"), null),
+ (FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0, 0,
"ignored2"), null),
+ (FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0, 0,
"ignored3"), null),
+ (FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0, 0,
"ignored4"), null),
+ (FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0, 0,
"ignored5"), null),
+ (FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0, 0,
"ignored6"), null)))
+
+ scheduler.resubmitFailedStages()
+ assertDataStructuresEmpty()
+ sc.listenerBus.waitUntilEmpty(1000)
+ assert(ended)
+ assert(jobResult.isInstanceOf[JobFailed])
+ }
+
+ test("Multiple consecutive task failures (not FetchFailures) in a stage
should not " +
+ "trigger an abort.") {
+ // Create a new Listener to confirm that the listenerBus sees the
JobEnd message
+ // when we abort the stage. This message will also be consumed by the
EventLoggingListener
+ // so this will propagate up to the user.
+ var ended = false
+ var jobResult : JobResult = null
+ class EndListener extends SparkListener {
+ override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = {
+ jobResult = jobEnd.jobResult
+ ended = true
+ }
+ }
+
+ sc.listenerBus.addListener(new EndListener())
+
+ val shuffleMapRdd = new MyRDD(sc, 8, Nil)
+ val shuffleDep = new ShuffleDependency(shuffleMapRdd, null)
+ val shuffleId = shuffleDep.shuffleId
+ val reduceRdd = new MyRDD(sc, 8, List(shuffleDep))
+ submit(reduceRdd, Array(0, 1, 2, 3, 4, 5, 6, 7))
+ complete(taskSets(0), Seq(
+ (Success, makeMapStatus("hostA", 1)),
+ (Success, makeMapStatus("hostA", 1)),
+ (Success, makeMapStatus("hostA", 1)),
+ (Success, makeMapStatus("hostA", 1)),
+ (Success, makeMapStatus("hostA", 1)),
+ (Success, makeMapStatus("hostA", 1)),
+ (Success, makeMapStatus("hostA", 1)),
+ (Success, makeMapStatus("hostB", 1))))
+
+ complete(taskSets(1), Seq(
+ (Success, 42),
+ (FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0, 0,
"ignored"), null),
+ (FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0, 0,
"ignored1"), null),
+ (ExceptionFailure("fakeExcept", "failA", null, "This is a stack.",
None), null),
+ (ExceptionFailure("fakeExcept", "failB", null, "This is a stack.",
None), null),
+ (ExceptionFailure("fakeExcept", "failC", null, "This is a stack.",
None), null),
+ (ExceptionFailure("fakeExcept", "failD", null, "This is a stack.",
None), null),
+ (Success, 43)))
+
+ scheduler.resubmitFailedStages()
+ assert(scheduler.runningStages.nonEmpty)
+ assert(!ended)
--- End diff --
Thanks for the comments @squito. I'll pick this up once the other patch is
merged.
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]