Github user mateiz commented on a diff in the pull request:
https://github.com/apache/spark/pull/1313#discussion_r15830100
--- Diff:
core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala ---
@@ -363,25 +378,27 @@ private[spark] class TaskSetManager(
}
}
- // Look for no-pref tasks after rack-local tasks since they can run
anywhere.
- for (index <- findTaskFromList(execId, pendingTasksWithNoPrefs)) {
- return Some((index, TaskLocality.PROCESS_LOCAL, false))
- }
-
- if (TaskLocality.isAllowed(locality, TaskLocality.ANY)) {
+ if (TaskLocality.isAllowed(maxLocality, TaskLocality.ANY)) {
for (index <- findTaskFromList(execId, allPendingTasks)) {
return Some((index, TaskLocality.ANY, false))
}
}
- // Finally, if all else has failed, find a speculative task
- findSpeculativeTask(execId, host, locality).map { case (taskIndex,
allowedLocality) =>
- (taskIndex, allowedLocality, true)
- }
+ // find a speculative task if all others tasks have been scheduled
+ findSpeculativeTask(execId, host, maxLocality).map {
+ case (taskIndex, allowedLocality) => (taskIndex, allowedLocality,
true)}
}
/**
* Respond to an offer of a single executor from the scheduler by
finding a task
+ *
+ * NOTE: this function is either called with a real preferredLocality
level which
--- End diff --
It's actually maxLocality, not preferredLocality
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]