Github user sebastienrainville commented on a diff in the pull request:

    https://github.com/apache/spark/pull/10924#discussion_r53118429
  
    --- Diff: 
core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/CoarseMesosSchedulerBackend.scala
 ---
    @@ -254,53 +258,65 @@ private[spark] class CoarseMesosSchedulerBackend(
             val cpus = getResource(offer.getResourcesList, "cpus").toInt
             val id = offer.getId.getValue
             if (meetsConstraints) {
    -          if (taskIdToSlaveId.size < executorLimit &&
    -              totalCoresAcquired < maxCores &&
    -              mem >= calculateTotalMemory(sc) &&
    -              cpus >= 1 &&
    -              failuresBySlaveId.getOrElse(slaveId, 0) < MAX_SLAVE_FAILURES 
&&
    -              !slaveIdsWithExecutors.contains(slaveId)) {
    -            // Launch an executor on the slave
    -            val cpusToUse = math.min(cpus, maxCores - totalCoresAcquired)
    -            totalCoresAcquired += cpusToUse
    -            val taskId = newMesosTaskId()
    -            taskIdToSlaveId.put(taskId, slaveId)
    -            slaveIdsWithExecutors += slaveId
    -            coresByTaskId(taskId) = cpusToUse
    -            // Gather cpu resources from the available resources and use 
them in the task.
    -            val (remainingResources, cpuResourcesToUse) =
    -              partitionResources(offer.getResourcesList, "cpus", cpusToUse)
    -            val (_, memResourcesToUse) =
    -              partitionResources(remainingResources.asJava, "mem", 
calculateTotalMemory(sc))
    -            val taskBuilder = MesosTaskInfo.newBuilder()
    -              
.setTaskId(TaskID.newBuilder().setValue(taskId.toString).build())
    -              .setSlaveId(offer.getSlaveId)
    -              .setCommand(createCommand(offer, cpusToUse + 
extraCoresPerSlave, taskId))
    -              .setName("Task " + taskId)
    -              .addAllResources(cpuResourcesToUse.asJava)
    -              .addAllResources(memResourcesToUse.asJava)
    -
    -            sc.conf.getOption("spark.mesos.executor.docker.image").foreach 
{ image =>
    -              MesosSchedulerBackendUtil
    -                .setupContainerBuilderDockerInfo(image, sc.conf, 
taskBuilder.getContainerBuilder())
    +          if (totalCoresAcquired < maxCores) {
    +            if (taskIdToSlaveId.size < executorLimit &&
    +                mem >= calculateTotalMemory(sc) &&
    +                cpus >= 1 &&
    +                failuresBySlaveId.getOrElse(slaveId, 0) < 
MAX_SLAVE_FAILURES &&
    +                !slaveIdsWithExecutors.contains(slaveId)) {
    +              // Launch an executor on the slave
    +              val cpusToUse = math.min(cpus, maxCores - totalCoresAcquired)
    +              totalCoresAcquired += cpusToUse
    +              val taskId = newMesosTaskId()
    +              taskIdToSlaveId.put(taskId, slaveId)
    +              slaveIdsWithExecutors += slaveId
    +              coresByTaskId(taskId) = cpusToUse
    +              // Gather cpu resources from the available resources and use 
them in the task.
    +              val (remainingResources, cpuResourcesToUse) =
    +                partitionResources(offer.getResourcesList, "cpus", 
cpusToUse)
    +              val (_, memResourcesToUse) =
    +                partitionResources(remainingResources.asJava, "mem", 
calculateTotalMemory(sc))
    +              val taskBuilder = MesosTaskInfo.newBuilder()
    +                
.setTaskId(TaskID.newBuilder().setValue(taskId.toString).build())
    +                .setSlaveId(offer.getSlaveId)
    +                .setCommand(createCommand(offer, cpusToUse + 
extraCoresPerSlave, taskId))
    +                .setName("Task " + taskId)
    +                .addAllResources(cpuResourcesToUse.asJava)
    +                .addAllResources(memResourcesToUse.asJava)
    +
    +              
sc.conf.getOption("spark.mesos.executor.docker.image").foreach { image =>
    +                
MesosSchedulerBackendUtil.setupContainerBuilderDockerInfo(image, sc.conf,
    +                  taskBuilder.getContainerBuilder())
    +              }
    +
    +              // Accept the offer and launch the task
    +              logDebug(s"Accepting offer: $id with attributes: 
$offerAttributes" +
    +                s" mem: $mem cpu: $cpus")
    +              slaveIdToHost(offer.getSlaveId.getValue) = offer.getHostname
    +              d.launchTasks(
    +                Collections.singleton(offer.getId),
    +                Collections.singleton(taskBuilder.build()), filters)
    +            } else {
    +              // Decline the offer
    +              logDebug(s"Declining offer: $id with attributes: 
$offerAttributes" +
    +                s" mem: $mem cpu: $cpus")
    +              d.declineOffer(offer.getId)
                 }
    -
    -            // Accept the offer and launch the task
    -            logDebug(s"Accepting offer: $id with attributes: 
$offerAttributes mem: $mem cpu: $cpus")
    -            slaveIdToHost(offer.getSlaveId.getValue) = offer.getHostname
    -            d.launchTasks(
    -              Collections.singleton(offer.getId),
    -              Collections.singleton(taskBuilder.build()), filters)
               } else {
    -            // Decline the offer
    -            logDebug(s"Declining offer: $id with attributes: 
$offerAttributes mem: $mem cpu: $cpus")
    -            d.declineOffer(offer.getId)
    +            // We reached the maximum number of cores for this framework. 
We don't need to see
    +            // new offers. Decline the offer for a long period of time.
    +            logDebug(s"Declining offer (reached max cores): $id with 
attributes:" +
    +              s" $offerAttributes mem: $mem cpu: $cpus" +
    +              s" for $rejectOfferDurationForReachedMaxCores seconds")
    +            d.declineOffer(offer.getId, Filters.newBuilder()
    +              
.setRefuseSeconds(rejectOfferDurationForReachedMaxCores).build())
    --- End diff --
    
    My first implementation was actually removing all the code duplication in 
the decline offer path, but it seemed overkill:
    ```scala
            def declineOffer(reason: Option[String] = None, refuseSeconds: 
Option[Long] = None) {
              logDebug("Declining offer" +
                reason.fold("") { r => s" ($r)"} +
                s": $id with attributes: $offerAttributes mem: $mem cpu: $cpus" 
+
                refuseSeconds.fold("") { r => s" for $r seconds" })
    
              refuseSeconds match {
                case Some(seconds) => {
                  val filter = 
Filters.newBuilder().setRefuseSeconds(seconds).build()
                  d.declineOffer(offer.getId, filter)
                }
                case _ => d.declineOffer(offer.getId)
              }
            }
    ```
    
    Also this cannot be reused easily in the fine-grained mode since it relies 
on attributes computed locally in the loop. I opted for simplicity thinking 
that this whole function would be refactored at some point. I'm happy to use 
the implementation above for `refuseOffer` if you think that it's better. It 
can be simplified quite a bit if it's only for the 2 cases where offers are 
rejected for a longer period but then we still have similar code for the 
default reject.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to