tgravescs commented on a change in pull request #27223: 
[SPARK-30511][SPARK-28403][CORE] Don't treat failed/killed speculative tasks as 
pending in Spark scheduler
URL: https://github.com/apache/spark/pull/27223#discussion_r370395846
 
 

 ##########
 File path: 
core/src/test/scala/org/apache/spark/ExecutorAllocationManagerSuite.scala
 ##########
 @@ -263,6 +263,99 @@ class ExecutorAllocationManagerSuite extends 
SparkFunSuite {
     assert(numExecutorsToAdd(manager) === 1)
   }
 
+  test("SPARK-30511 remove executors when speculative tasks end") {
+    val manager = createManager(createConf(0, 10, 
0).set(config.EXECUTOR_CORES, 4))
+
+    post(SparkListenerStageSubmitted(createStageInfo(0, 40)))
+    assert(addExecutors(manager) === 1)
+    assert(addExecutors(manager) === 2)
+    assert(addExecutors(manager) === 4)
+    assert(addExecutors(manager) === 3)
+
+    (0 to 9).foreach(execId => onExecutorAdded(manager, execId.toString))
+    (0 to 39).map { i => createTaskInfo(i, i, executorId = s"${i / 
4}")}.foreach {
+      info => post(SparkListenerTaskStart(0, 0, info))
+    }
+    assert(numExecutorsTarget(manager) === 10)
+    assert(maxNumExecutorsNeeded(manager) == 10)
+
+    // 30 tasks (0 - 29) finished
+    (0 to 29).map { i => createTaskInfo(i, i, executorId = s"${i / 
4}")}.foreach {
+      info => post(SparkListenerTaskEnd(0, 0, null, Success, info, new 
ExecutorMetrics, null)) }
+    adjustRequestedExecutors(manager)
+    assert(numExecutorsTarget(manager) === 3)
+    assert(maxNumExecutorsNeeded(manager) == 3)
+    (0 to 6).foreach { i => assert(removeExecutor(manager, i.toString))}
+    (0 to 6).foreach { i => onExecutorRemoved(manager, i.toString)}
+
+    // 10 speculative tasks (30 - 39) launch for the remaining tasks
+    (30 to 39).foreach { _ => post(SparkListenerSpeculativeTaskSubmitted(0))}
+    adjustRequestedExecutors(manager)
+    assert(addExecutors(manager) === 1)
+    assert(addExecutors(manager) === 2)
+    assert(numExecutorsTarget(manager) == 6)
+    assert(maxNumExecutorsNeeded(manager) == 6)
+    (10 to 12).foreach(execId => onExecutorAdded(manager, execId.toString))
+    (40 to 49).map { i =>
+      createTaskInfo(taskId = i, taskIndex = i - 10, executorId = s"${i / 4}", 
speculative = true)}
+      .foreach { info => post(SparkListenerTaskStart(0, 0, info))}
+    adjustRequestedExecutors(manager)
+    assert(numExecutorsTarget(manager) == 5) // At this point, we still have 6 
executors running
 
 Review comment:
   wait why did this number change to 5? none of the tasks finished between 
this and the previous check

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to