Github user andrewor14 commented on a diff in the pull request:

    https://github.com/apache/spark/pull/186#discussion_r11713717
  
    --- Diff: core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala 
---
    @@ -1152,13 +1006,158 @@ class DAGScheduler(
       }
     
       def stop() {
    -    if (eventProcessActor != null) {
    -      eventProcessActor ! StopDAGScheduler
    -    }
    +    logInfo("Stopping DAGScheduler")
    +    dagSchedulerActorSupervisor ! PoisonPill
         taskScheduler.stop()
       }
     }
     
    +private[scheduler] class DAGSchedulerActorSupervisor(dagScheduler: 
DAGScheduler)
    +  extends Actor with Logging {
    +
    +  override val supervisorStrategy =
    +    OneForOneStrategy() {
    +      case x: Exception => {
    +        logError("eventProcesserActor failed due to the error %s; shutting 
down SparkContext"
    +          .format(x.getMessage))
    +        dagScheduler.doCancelAllJobs()
    +        dagScheduler.sc.stop()
    +        Stop
    +      }
    +    }
    +
    +  def receive = {
    +    case p: Props => sender ! context.actorOf(p)
    +    case _ =>
    +  }
    +
    +  dagScheduler.eventProcessActor = context.actorOf(
    +    Props(new DAGSchedulerEventProcessActor(dagScheduler)))
    +}
    +
    +private[scheduler] class DAGSchedulerEventProcessActor(dagScheduler: 
DAGScheduler)
    +  extends Actor with Logging {
    +
    +  override def preStart() {
    +    // set DAGScheduler for taskScheduler to ensure eventProcessActor is 
always
    +    // valid when the messages arrive
    +    dagScheduler.taskScheduler.setDAGScheduler(dagScheduler)
    +  }
    +
    +  /**
    +   * The main event loop of the DAG scheduler.
    +   */
    +  def receive = {
    +    case JobSubmitted(jobId, rdd, func, partitions, allowLocal, callSite, 
listener, properties) =>
    +      var finalStage: Stage = null
    +      try {
    +        // New stage creation may throw an exception if, for example, jobs 
are run on a
    +        // HadoopRDD whose underlying HDFS files have been deleted.
    +        finalStage = dagScheduler.newStage(rdd, partitions.size, None, 
jobId, Some(callSite))
    +      } catch {
    +        case e: Exception =>
    +          logWarning("Creating new stage failed due to exception - job: " 
+ jobId, e)
    +          listener.jobFailed(e)
    +      }
    +      val job = new ActiveJob(jobId, finalStage, func, partitions, 
callSite, listener, properties)
    +      dagScheduler.clearCacheLocs()
    +      logInfo("Got job " + job.jobId + " (" + callSite + ") with " + 
partitions.length +
    +        " output partitions (allowLocal=" + allowLocal + ")")
    --- End diff --
    
    super small nit: This looks a little clumsy. I would do something like
    `"Got job %s (%s) with %d output partitions 
(allowLocal=%s)".format(job.jobId, callSite, partitions.length, allowLocal"`
    or
    `s"Got job $job.jobId ($callSite) with $partitions.length output partitions 
(allowLocal=$allowLocal)"`
    
    If you decide do make this change, we should use the same string formatting 
elsewhere in this file


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

Reply via email to