Github user markhamstra commented on a diff in the pull request:

    https://github.com/apache/spark/pull/3779#discussion_r24049293
  
    --- Diff: 
core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala ---
    @@ -506,13 +506,59 @@ private[spark] class TaskSetManager(
        * Get the level we can launch tasks according to delay scheduling, 
based on current wait time.
        */
       private def getAllowedLocalityLevel(curTime: Long): 
TaskLocality.TaskLocality = {
    -    while (curTime - lastLaunchTime >= localityWaits(currentLocalityIndex) 
&&
    -        currentLocalityIndex < myLocalityLevels.length - 1)
    -    {
    -      // Jump to the next locality level, and remove our waiting time for 
the current one since
    -      // we don't want to count it again on the next one
    -      lastLaunchTime += localityWaits(currentLocalityIndex)
    -      currentLocalityIndex += 1
    +    // Remove the scheduled or finished tasks lazily
    +    def hasNotScheduledTasks(taskIndexes: ArrayBuffer[Int]): Boolean = {
    +      var indexOffset = taskIndexes.size
    +      while (indexOffset > 0) {
    +        indexOffset -= 1
    +        val index = taskIndexes(indexOffset)
    +        if (copiesRunning(index) == 0 && !successful(index)) {
    +          return true
    +        } else {
    +          taskIndexes.remove(indexOffset)
    +        }
    +      }
    +      false
    +    }
    +    // It removes the empty lists after check
    +    def hasMoreTasks(pendingTasks: HashMap[String, ArrayBuffer[Int]]): 
Boolean = {
    +      val emptyKeys = new ArrayBuffer[String]
    +      val hasTasks = pendingTasks.exists{
    +        case (id: String, tasks: ArrayBuffer[Int]) =>
    +          if (hasNotScheduledTasks(tasks)) {
    +            true
    +          } else {
    +            emptyKeys += id
    +            false
    +          }
    +      }
    +      emptyKeys.foreach(x => pendingTasks.remove(x))
    +      hasTasks
    +    }
    +
    +    while (currentLocalityIndex < myLocalityLevels.length - 1) {
    +      val moreTasks = myLocalityLevels(currentLocalityIndex) match {
    +        case TaskLocality.PROCESS_LOCAL => 
hasMoreTasks(pendingTasksForExecutor)
    +        case TaskLocality.NODE_LOCAL => hasMoreTasks(pendingTasksForHost)
    +        case TaskLocality.NO_PREF => pendingTasksWithNoPrefs.isEmpty
    +        case TaskLocality.RACK_LOCAL => hasMoreTasks(pendingTasksForRack)
    +      }
    +      if (!moreTasks) {
    +        // Move to next locality level if there is no task for current 
level
    +        lastLaunchTime = curTime
    +        logDebug(s"No tasks for locality level 
${myLocalityLevels(currentLocalityIndex)} " +
    +          s"move to ${myLocalityLevels(currentLocalityIndex + 1)}")
    +        currentLocalityIndex += 1
    +      } else if (curTime - lastLaunchTime >= 
localityWaits(currentLocalityIndex)) {
    +        // Jump to the next locality level, and remove our waiting time 
for the current one since
    +        // we don't want to count it again on the next one
    +        lastLaunchTime += localityWaits(currentLocalityIndex)
    +        currentLocalityIndex += 1
    +        logDebug(s"Move to ${myLocalityLevels(currentLocalityIndex)} after 
wait for " +
    +          s"${localityWaits(currentLocalityIndex)} ms")
    +      } else {
    +        return myLocalityLevels(currentLocalityIndex)
    +      }
    --- End diff --
    
    This looks correct to me, but I'm not finding the naming helpful -- in 
particular, the excess of "index" in `hasNotScheduledTasks`.  I'd suggest 
something more like this to improve readability (although I'm being a little 
indecisive about "run" vs. "schedule"):
    ```scala
        def tasksNeedToBeScheduledFrom(pendingTaskIds: ArrayBuffer[Int]): 
Boolean = {
          var idOffset = pendingTaskIds.size
          while (idOffset > 0) {
            idOffset -= 1
            val index = pendingTaskIds(idOffset)
            if (copiesRunning(index) == 0 && !successful(index)) {
              return true
            } else {
              pendingTaskIds.remove(idOffset)
            }
          }
          false
        }
        // It removes the empty lists after check
        def noMoreTasksToRunIn(pendingTasks: HashMap[String, 
ArrayBuffer[Int]]): Boolean = {
          val emptyKeys = new ArrayBuffer[String]
          val moreTasksToRun = pendingTasks.exists {
            case (id: String, pendingTaskIds: ArrayBuffer[Int]) =>
              if (tasksNeedToBeScheduledFrom(pendingTaskIds)) {
                true
              } else {
                emptyKeys += id
                false
              }
          }
          emptyKeys.foreach(x => pendingTasks.remove(x))
          !moreTasksToRun
        }
    
        while (currentLocalityIndex < myLocalityLevels.length - 1) {
          val noMoreTasksToRun = myLocalityLevels(currentLocalityIndex) match {
            case TaskLocality.PROCESS_LOCAL => 
noMoreTasksToRunIn(pendingTasksForExecutor)
            case TaskLocality.NODE_LOCAL => 
noMoreTasksToRunIn(pendingTasksForHost)
            case TaskLocality.NO_PREF => pendingTasksWithNoPrefs.nonEmpty
            case TaskLocality.RACK_LOCAL => 
noMoreTasksToRunIn(pendingTasksForRack)
          }
          if (noMoreTasksToRun) {
    .
    .
    .
    ```
    `getAllowedLocalityLevel` also doesn't give adequate notice of the side 
effect that modifies `emptyKeys`, but I haven't got a good solution for that -- 
other than hoisting and intensifying the comment before `hasMoreTasks`. 


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to