Github user dragos commented on a diff in the pull request:

    https://github.com/apache/spark/pull/10924#discussion_r53344844
  
    --- Diff: 
core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/CoarseMesosSchedulerBackend.scala
 ---
    @@ -254,53 +258,65 @@ private[spark] class CoarseMesosSchedulerBackend(
             val cpus = getResource(offer.getResourcesList, "cpus").toInt
             val id = offer.getId.getValue
             if (meetsConstraints) {
    -          if (taskIdToSlaveId.size < executorLimit &&
    -              totalCoresAcquired < maxCores &&
    -              mem >= calculateTotalMemory(sc) &&
    -              cpus >= 1 &&
    -              failuresBySlaveId.getOrElse(slaveId, 0) < MAX_SLAVE_FAILURES 
&&
    -              !slaveIdsWithExecutors.contains(slaveId)) {
    -            // Launch an executor on the slave
    -            val cpusToUse = math.min(cpus, maxCores - totalCoresAcquired)
    -            totalCoresAcquired += cpusToUse
    -            val taskId = newMesosTaskId()
    -            taskIdToSlaveId.put(taskId, slaveId)
    -            slaveIdsWithExecutors += slaveId
    -            coresByTaskId(taskId) = cpusToUse
    -            // Gather cpu resources from the available resources and use 
them in the task.
    -            val (remainingResources, cpuResourcesToUse) =
    -              partitionResources(offer.getResourcesList, "cpus", cpusToUse)
    -            val (_, memResourcesToUse) =
    -              partitionResources(remainingResources.asJava, "mem", 
calculateTotalMemory(sc))
    -            val taskBuilder = MesosTaskInfo.newBuilder()
    -              
.setTaskId(TaskID.newBuilder().setValue(taskId.toString).build())
    -              .setSlaveId(offer.getSlaveId)
    -              .setCommand(createCommand(offer, cpusToUse + 
extraCoresPerSlave, taskId))
    -              .setName("Task " + taskId)
    -              .addAllResources(cpuResourcesToUse.asJava)
    -              .addAllResources(memResourcesToUse.asJava)
    -
    -            sc.conf.getOption("spark.mesos.executor.docker.image").foreach 
{ image =>
    -              MesosSchedulerBackendUtil
    -                .setupContainerBuilderDockerInfo(image, sc.conf, 
taskBuilder.getContainerBuilder())
    +          if (totalCoresAcquired < maxCores) {
    +            if (taskIdToSlaveId.size < executorLimit &&
    +                mem >= calculateTotalMemory(sc) &&
    +                cpus >= 1 &&
    +                failuresBySlaveId.getOrElse(slaveId, 0) < 
MAX_SLAVE_FAILURES &&
    +                !slaveIdsWithExecutors.contains(slaveId)) {
    +              // Launch an executor on the slave
    +              val cpusToUse = math.min(cpus, maxCores - totalCoresAcquired)
    +              totalCoresAcquired += cpusToUse
    +              val taskId = newMesosTaskId()
    +              taskIdToSlaveId.put(taskId, slaveId)
    +              slaveIdsWithExecutors += slaveId
    +              coresByTaskId(taskId) = cpusToUse
    +              // Gather cpu resources from the available resources and use 
them in the task.
    +              val (remainingResources, cpuResourcesToUse) =
    +                partitionResources(offer.getResourcesList, "cpus", 
cpusToUse)
    +              val (_, memResourcesToUse) =
    +                partitionResources(remainingResources.asJava, "mem", 
calculateTotalMemory(sc))
    +              val taskBuilder = MesosTaskInfo.newBuilder()
    +                
.setTaskId(TaskID.newBuilder().setValue(taskId.toString).build())
    +                .setSlaveId(offer.getSlaveId)
    +                .setCommand(createCommand(offer, cpusToUse + 
extraCoresPerSlave, taskId))
    +                .setName("Task " + taskId)
    +                .addAllResources(cpuResourcesToUse.asJava)
    +                .addAllResources(memResourcesToUse.asJava)
    +
    +              
sc.conf.getOption("spark.mesos.executor.docker.image").foreach { image =>
    +                
MesosSchedulerBackendUtil.setupContainerBuilderDockerInfo(image, sc.conf,
    +                  taskBuilder.getContainerBuilder())
    +              }
    +
    +              // Accept the offer and launch the task
    +              logDebug(s"Accepting offer: $id with attributes: 
$offerAttributes" +
    +                s" mem: $mem cpu: $cpus")
    +              slaveIdToHost(offer.getSlaveId.getValue) = offer.getHostname
    +              d.launchTasks(
    +                Collections.singleton(offer.getId),
    +                Collections.singleton(taskBuilder.build()), filters)
    +            } else {
    +              // Decline the offer
    +              logDebug(s"Declining offer: $id with attributes: 
$offerAttributes" +
    +                s" mem: $mem cpu: $cpus")
    +              d.declineOffer(offer.getId)
                 }
    -
    -            // Accept the offer and launch the task
    -            logDebug(s"Accepting offer: $id with attributes: 
$offerAttributes mem: $mem cpu: $cpus")
    -            slaveIdToHost(offer.getSlaveId.getValue) = offer.getHostname
    -            d.launchTasks(
    -              Collections.singleton(offer.getId),
    -              Collections.singleton(taskBuilder.build()), filters)
               } else {
    -            // Decline the offer
    -            logDebug(s"Declining offer: $id with attributes: 
$offerAttributes mem: $mem cpu: $cpus")
    -            d.declineOffer(offer.getId)
    +            // We reached the maximum number of cores for this framework. 
We don't need to see
    +            // new offers. Decline the offer for a long period of time.
    +            logDebug(s"Declining offer (reached max cores): $id with 
attributes:" +
    +              s" $offerAttributes mem: $mem cpu: $cpus" +
    +              s" for $rejectOfferDurationForReachedMaxCores seconds")
    +            d.declineOffer(offer.getId, Filters.newBuilder()
    +              
.setRefuseSeconds(rejectOfferDurationForReachedMaxCores).build())
    --- End diff --
    
    To me it seems the only thing it depends on is the `offerId`, so it could 
go in `MesosSchedulerUtils`.
    
    But if that's overkill, let's do it only for this one, and get rid of the 
nested if structure. It also means there's no need to use `Option` for the 
reason and duration.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to