Github user squito commented on a diff in the pull request:

    https://github.com/apache/spark/pull/14079#discussion_r79916404
  
    --- Diff: 
core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala ---
    @@ -592,34 +610,59 @@ private[spark] class TaskSetManager(
        * failures (this is because the method picks on unscheduled task, and 
then iterates through each
        * executor until it finds one that the task hasn't failed on already).
        */
    -  private[scheduler] def abortIfCompletelyBlacklisted(executors: 
Iterable[String]): Unit = {
    -
    -    val pendingTask: Option[Int] = {
    -      // usually this will just take the last pending task, but because of 
the lazy removal
    -      // from each list, we may need to go deeper in the list.  We poll 
from the end because
    -      // failed tasks are put back at the end of allPendingTasks, so we're 
more likely to find
    -      // an unschedulable task this way.
    -      val indexOffset = allPendingTasks.lastIndexWhere { indexInTaskSet =>
    -        copiesRunning(indexInTaskSet) == 0 && !successful(indexInTaskSet)
    -      }
    -      if (indexOffset == -1) {
    -        None
    -      } else {
    -        Some(allPendingTasks(indexOffset))
    -      }
    -    }
    +  private[scheduler] def abortIfCompletelyBlacklisted(
    +      hostToExecutors: HashMap[String, HashSet[String]]): Unit = {
    +    blacklistTracker.foreach { blacklist =>
    +      // because this is called in a loop, with multiple resource offers 
and locality levels,
    +      // we could end up aborting this taskset multiple times without the 
!isZombie check
    +      if (!isZombie) {
    +        // take any task that needs to be scheduled, and see if we can 
find some executor it *could*
    +        // run on
    +        val pendingTask: Option[Int] = {
    +          // usually this will just take the last pending task, but 
because of the lazy removal
    +          // from each list, we may need to go deeper in the list.  We 
poll from the end because
    +          // failed tasks are put back at the end of allPendingTasks, so 
we're more likely to find
    +          // an unschedulable task this way.
    +          val indexOffset = allPendingTasks.lastIndexWhere { 
indexInTaskSet =>
    +            copiesRunning(indexInTaskSet) == 0 && 
!successful(indexInTaskSet)
    +          }
    +          if (indexOffset == -1) {
    +            None
    +          } else {
    +            Some(allPendingTasks(indexOffset))
    +          }
    +        }
     
    -    // If no executors have registered yet, don't abort the stage, just 
wait.  We probably
    -    // got here because a task set was added before the executors 
registered.
    -    if (executors.nonEmpty) {
    -      // take any task that needs to be scheduled, and see if we can find 
some executor it *could*
    -      // run on
    -      pendingTask.foreach { taskId =>
    -        if (executors.forall(executorIsBlacklisted(_, taskId))) {
    -          val execs = executors.toIndexedSeq.sorted.mkString("(", ",", ")")
    -          val partition = tasks(taskId).partitionId
    -          abort(s"Aborting ${taskSet} because task $taskId (partition 
$partition)" +
    -            s" has already failed on executors $execs, and no other 
executors are available.")
    +        // If no executors have registered yet, don't abort the stage, 
just wait.  We probably
    +        // got here because a task set was added before the executors 
registered.
    +        if (hostToExecutors.nonEmpty) {
    --- End diff --
    
    good point


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to