Github user mateiz commented on a diff in the pull request:

    https://github.com/apache/spark/pull/186#discussion_r11918236
  
    --- Diff: core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala 
---
    @@ -1152,13 +1003,160 @@ class DAGScheduler(
       }
     
       def stop() {
    -    if (eventProcessActor != null) {
    -      eventProcessActor ! StopDAGScheduler
    -    }
    +    logInfo("Stopping DAGScheduler")
    +    dagSchedulerActorSupervisor ! PoisonPill
         taskScheduler.stop()
       }
     }
     
    +private[scheduler] class DAGSchedulerActorSupervisor(dagScheduler: 
DAGScheduler)
    +  extends Actor with Logging {
    +
    +  override val supervisorStrategy =
    +    OneForOneStrategy() {
    +      case x: Exception =>
    +        logError("eventProcesserActor failed due to the error %s; shutting 
down SparkContext"
    +          .format(x.getMessage))
    +        dagScheduler.doCancelAllJobs()
    +        dagScheduler.sc.stop()
    +        Stop
    +    }
    +
    +  def receive = {
    +    case p: Props => sender ! context.actorOf(p)
    +    case _ => logWarning("received unknown message in 
DAGSchedulerActorSupervisor")
    +  }
    +
    +  dagScheduler.eventProcessActor = context.actorOf(
    +    Props(new DAGSchedulerEventProcessActor(dagScheduler)))
    +}
    +
    +private[scheduler] class DAGSchedulerEventProcessActor(dagScheduler: 
DAGScheduler)
    +  extends Actor with Logging {
    +
    +  override def preStart() {
    +    // set DAGScheduler for taskScheduler to ensure eventProcessActor is 
always
    +    // valid when the messages arrive
    +    dagScheduler.taskScheduler.setDAGScheduler(dagScheduler)
    +  }
    +
    +  /**
    +   * The main event loop of the DAG scheduler.
    +   */
    +  def receive = {
    +    case JobSubmitted(jobId, rdd, func, partitions, allowLocal, callSite, 
listener, properties) =>
    +      var finalStage: Stage = null
    +      try {
    +        // New stage creation may throw an exception if, for example, jobs 
are run on a
    +        // HadoopRDD whose underlying HDFS files have been deleted.
    +        finalStage = dagScheduler.newStage(rdd, partitions.size, None, 
jobId, Some(callSite))
    +      } catch {
    +        case e: Exception =>
    +          logWarning("Creating new stage failed due to exception - job: " 
+ jobId, e)
    +          listener.jobFailed(e)
    +      }
    +      val job = new ActiveJob(jobId, finalStage, func, partitions, 
callSite, listener, properties)
    +      dagScheduler.clearCacheLocs()
    +      logInfo("Got job %s (%s) with %d output partitions (allowLocal=%s)".
    +        format(job.jobId, callSite, partitions.length, allowLocal))
    +      logInfo("Final stage: " + finalStage + "(" + finalStage.name + ")")
    +      logInfo("Parents of final stage: " + finalStage.parents)
    +      logInfo("Missing parents: " + 
dagScheduler.getMissingParentStages(finalStage))
    +      if (allowLocal && finalStage.parents.size == 0 && partitions.length 
== 1) {
    +        // Compute very short actions like first() or take() with no 
parent stages locally.
    +        dagScheduler.listenerBus.post(SparkListenerJobStart(job.jobId, 
Array[Int](), properties))
    +        dagScheduler.runLocally(job)
    +      } else {
    +        dagScheduler.jobIdToActiveJob(jobId) = job
    +        dagScheduler.activeJobs += job
    +        dagScheduler.resultStageToJob(finalStage) = job
    +        dagScheduler.listenerBus.post(
    +          SparkListenerJobStart(job.jobId, 
dagScheduler.jobIdToStageIds(jobId).toArray,
    +            properties))
    +        dagScheduler.submitStage(finalStage)
    +      }
    +
    +    case StageCancelled(stageId) =>
    +      dagScheduler.handleStageCancellation(stageId)
    +
    +    case JobCancelled(jobId) =>
    +      dagScheduler.handleJobCancellation(jobId)
    +
    +    case JobGroupCancelled(groupId) =>
    +      // Cancel all jobs belonging to this job group.
    +      // First finds all active jobs with this group id, and then kill 
stages for them.
    +      val activeInGroup = dagScheduler.activeJobs.filter(activeJob =>
    +        groupId == 
activeJob.properties.get(SparkContext.SPARK_JOB_GROUP_ID))
    +      val jobIds = activeInGroup.map(_.jobId)
    +      jobIds.foreach(dagScheduler.handleJobCancellation(_, "part of cancel 
job group"))
    +
    +    case AllJobsCancelled =>
    +      dagScheduler.doCancelAllJobs()
    +
    +    case ExecutorAdded(execId, host) =>
    +      dagScheduler.handleExecutorAdded(execId, host)
    +
    +    case ExecutorLost(execId) =>
    +      dagScheduler.handleExecutorLost(execId)
    +
    +    case BeginEvent(task, taskInfo) =>
    +      for (
    +        job <- dagScheduler.jobIdToActiveJob.get(task.stageId);
    +        stage <- dagScheduler.stageIdToStage.get(task.stageId);
    +        stageInfo <- dagScheduler.stageToInfos.get(stage)
    --- End diff --
    
    This is different from the code you removed above -- you're now iterating 
over `job` too; is that actually needed?


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

Reply via email to