Ngone51 commented on a change in pull request #27773: [SPARK-29154][CORE]
Update Spark scheduler for stage level scheduling
URL: https://github.com/apache/spark/pull/27773#discussion_r390073662
##########
File path:
core/src/main/scala/org/apache/spark/scheduler/TaskSchedulerImpl.scala
##########
@@ -381,12 +388,85 @@ private[spark] class TaskSchedulerImpl(
/**
* Check whether the resources from the WorkerOffer are enough to run at
least one task.
+ * Returns None if the resources don't meet the task requirements, otherwise
returns
+ * the task resource assignments to give to the next task. Note that the
assignments maybe
+ * be empty if no custom resources are used.
*/
- private def resourcesMeetTaskRequirements(resources: Map[String,
Buffer[String]]): Boolean = {
- val resourcesFree = resources.map(r => r._1 -> r._2.length)
- val meetsReqs = ResourceUtils.resourcesMeetRequirements(resourcesFree,
resourcesReqsPerTask)
- logDebug(s"Resources meet task requirements is: $meetsReqs")
- meetsReqs
+ private def resourcesMeetTaskRequirements(
+ taskSet: TaskSetManager,
+ availCpus: Int,
+ availWorkerResources: Map[String, Buffer[String]]
+ ): Option[Map[String, ResourceInformation]] = {
+ val rpId = taskSet.taskSet.resourceProfileId
+ val taskCpus = sc.resourceProfileManager.taskCpusForProfileId(rpId)
+ // check if the ResourceProfile has cpus first since that is common case
+ if (availCpus < taskCpus) return None
+
+ val taskSetProf = sc.resourceProfileManager.resourceProfileFromId(rpId)
+ // remove task cpus since we checked already
+ val tsResources =
taskSetProf.taskResources.filterKeys(!_.equals(ResourceProfile.CPUS))
+ val localTaskReqAssign = HashMap[String, ResourceInformation]()
+ if (tsResources.isEmpty) return Some(localTaskReqAssign.toMap)
+ // we go through all resources here so that we can make sure they match
and also get what the
+ // assignments are for the next task
+ for ((rName, taskReqs) <- tsResources) {
+ val taskAmount = taskSetProf.getSchedulerTaskResourceAmount(rName)
+ availWorkerResources.get(rName) match {
+ case Some(workerRes) =>
+ val workerAvail =
availWorkerResources.get(rName).map(_.size).getOrElse(0)
+ if (workerAvail >= taskAmount) {
+ localTaskReqAssign.put(rName, new ResourceInformation(rName,
+ workerRes.take(taskAmount).toArray))
+ } else {
+ return None
+ }
+ case None => return None
+ }
+ }
+ Some(localTaskReqAssign.toMap)
+ }
+
+ // Use the resource that the resourceProfile has as the limiting resource to
calculate the
+ // total number of slots available based on the current offers.
+ private def calculateAvailableSlots(
Review comment:
> unless it got checked in without me seeing it?
Barrier mode doesn't support dynamic allocation yet.
But sorry I don't quite understand your concern here.
With my understanding, `calculateAvailableSlots` only gives a chance to
barrier stage to try to launch all tasks at the same time. But it never
guarantee that barrier stage can be successfully launched even if there's
enough slots(e.g. due to delay scheduling). So, I mean, whether we calculate
total or partial sum of resource slots, it should makes no difference for
barrier stage scheduling.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]