Github user mateiz commented on a diff in the pull request:
https://github.com/apache/spark/pull/1313#discussion_r15508323
--- Diff:
core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala ---
@@ -363,38 +379,44 @@ private[spark] class TaskSetManager(
}
}
- // Look for no-pref tasks after rack-local tasks since they can run
anywhere.
- for (index <- findTaskFromList(execId, pendingTasksWithNoPrefs)) {
- return Some((index, TaskLocality.PROCESS_LOCAL, false))
- }
-
- if (TaskLocality.isAllowed(locality, TaskLocality.ANY)) {
+ if (TaskLocality.isAllowed(maxLocality, TaskLocality.ANY)) {
for (index <- findTaskFromList(execId, allPendingTasks)) {
return Some((index, TaskLocality.ANY, false))
}
}
- // Finally, if all else has failed, find a speculative task
- findSpeculativeTask(execId, host, locality).map { case (taskIndex,
allowedLocality) =>
- (taskIndex, allowedLocality, true)
- }
+ None
}
/**
* Respond to an offer of a single executor from the scheduler by
finding a task
+ *
+ * NOTE: this function is either called with a real preferredLocality
level which
+ * would be adjusted by delay scheduling algorithm or it will be with a
special
+ * NOPREF locality which will be not modified
+ *
+ * @param execId the executor Id of the offered resource
+ * @param host the host Id of the offered resource
+ * @param preferredLocality the maximum locality we want to schedule the
tasks at
*/
def resourceOffer(
execId: String,
host: String,
- maxLocality: TaskLocality.TaskLocality)
+ preferredLocality: TaskLocality.TaskLocality)
--- End diff --
This is still called maxLocality, don't rename it
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---