squito commented on a change in pull request #23677: [SPARK-26755][SCHEDULER] : 
Optimize Spark Scheduler to dequeue speculative tasks…
URL: https://github.com/apache/spark/pull/23677#discussion_r299579217
 
 

 ##########
 File path: core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala
 ##########
 @@ -246,85 +236,86 @@ private[spark] class TaskSetManager(
   /** Add a task to all the pending-task lists that it should be on. */
   private[spark] def addPendingTask(
       index: Int,
-      resolveRacks: Boolean = true): Unit = {
-    for (loc <- tasks(index).preferredLocations) {
-      loc match {
-        case e: ExecutorCacheTaskLocation =>
-          pendingTasksForExecutor.getOrElseUpdate(e.executorId, new 
ArrayBuffer) += index
-        case e: HDFSCacheTaskLocation =>
-          val exe = sched.getExecutorsAliveOnHost(loc.host)
-          exe match {
-            case Some(set) =>
-              for (e <- set) {
-                pendingTasksForExecutor.getOrElseUpdate(e, new ArrayBuffer) += 
index
-              }
-              logInfo(s"Pending task $index has a cached location at ${e.host} 
" +
-                ", where there are executors " + set.mkString(","))
-            case None => logDebug(s"Pending task $index has a cached location 
at ${e.host} " +
-                ", but there are no executors alive there.")
-          }
-        case _ =>
+      resolveRacks: Boolean = true,
+      speculative: Boolean = false): Unit = {
+    if (speculative) {
+      for (loc <- tasks(index).preferredLocations) {
+        loc match {
+          case e: ExecutorCacheTaskLocation =>
+            pendingSpeculatableTasks.forExecutor.getOrElseUpdate(
+              e.executorId, new ArrayBuffer) += index
+          case _ =>
+        }
+        pendingSpeculatableTasks.forHost.getOrElseUpdate(loc.host, new 
ArrayBuffer) += index
+        for (rack <- sched.getRackForHost(loc.host)) {
+          pendingSpeculatableTasks.forRack.getOrElseUpdate(rack, new 
ArrayBuffer) += index
+        }
       }
-      pendingTasksForHost.getOrElseUpdate(loc.host, new ArrayBuffer) += index
 
-      if (resolveRacks) {
-        sched.getRackForHost(loc.host).foreach { rack =>
-          pendingTasksForRack.getOrElseUpdate(rack, new ArrayBuffer) += index
-        }
+      if (tasks(index).preferredLocations == Nil) {
+        pendingSpeculatableTasks.noPrefs += index
       }
-    }
 
-    if (tasks(index).preferredLocations == Nil) {
-      pendingTasksWithNoPrefs += index
-    }
+      pendingSpeculatableTasks.anyPrefs += index
+    } else {
 
-    allPendingTasks += index  // No point scanning this whole list to find the 
old task there
-  }
+      for (loc <- tasks(index).preferredLocations) {
+        loc match {
+          case e: ExecutorCacheTaskLocation =>
+            pendingTasks.forExecutor.getOrElseUpdate(e.executorId, new 
ArrayBuffer) += index
+          case e: HDFSCacheTaskLocation =>
+            val exe = sched.getExecutorsAliveOnHost(loc.host)
+            exe match {
+              case Some(set) =>
+                for (e <- set) {
+                  pendingTasks.forExecutor.getOrElseUpdate(e, new ArrayBuffer) 
+= index
+                }
+                logInfo(s"Pending task $index has a cached location at 
${e.host} " +
+                  ", where there are executors " + set.mkString(","))
+              case None => logDebug(s"Pending task $index has a cached 
location at ${e.host} " +
+                ", but there are no executors alive there.")
+            }
+          case _ =>
+        }
+        pendingTasks.forHost.getOrElseUpdate(loc.host, new ArrayBuffer) += 
index
 
-  private[spark] def addPendingSpeculativeTask(index: Int) {
-    for (loc <- tasks(index).preferredLocations) {
-      loc match {
-        case e: ExecutorCacheTaskLocation =>
-          pendingSpeculatableTasksForExecutor.getOrElseUpdate(
-            e.executorId, new HashSet) += index
-        case _ =>
+        if (resolveRacks) {
+          sched.getRackForHost(loc.host).foreach { rack =>
+            pendingTasks.forRack.getOrElseUpdate(rack, new ArrayBuffer) += 
index
+          }
+        }
       }
-      pendingSpeculatableTasksForHost.getOrElseUpdate(loc.host, new HashSet) 
+= index
-      for (rack <- sched.getRackForHost(loc.host)) {
-        pendingSpeculatableTasksForRack.getOrElseUpdate(rack, new HashSet) += 
index
+
+      if (tasks(index).preferredLocations == Nil) {
+        pendingTasks.noPrefs += index
       }
-    }
 
-    if (tasks(index).preferredLocations == Nil) {
-        pendingSpeculatableTasksWithNoPrefs += index
+      pendingTasks.anyPrefs += index
     }
-
-    // No point scanning this whole list to find the old task there
-    allPendingSpeculatableTasks += index
   }
 
   /**
    * Return the pending tasks list for a given executor ID, or an empty list if
    * there is no map entry for that host
    */
   private def getPendingTasksForExecutor(executorId: String): ArrayBuffer[Int] 
= {
-    pendingTasksForExecutor.getOrElse(executorId, ArrayBuffer())
+    pendingTasks.forExecutor.getOrElse(executorId, ArrayBuffer())
 
 Review comment:
   I don't think we even need these private `getPendingTasksForXXX` helpers 
anymore (especially with some of my other suggested refactoring).  If you want 
to keep them, they should go on `class PendingTasksByLocality`

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to