Github user markhamstra commented on a diff in the pull request:

    https://github.com/apache/spark/pull/186#discussion_r11862359
  
    --- Diff: core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala 
---
    @@ -519,129 +494,6 @@ class DAGScheduler(
       }
     
       /**
    -   * Process one event retrieved from the event processing actor.
    -   *
    -   * @param event The event to be processed.
    -   * @return `true` if we should stop the event loop.
    -   */
    -  private[scheduler] def processEvent(event: DAGSchedulerEvent): Boolean = 
{
    -    event match {
    -      case JobSubmitted(jobId, rdd, func, partitions, allowLocal, 
callSite, listener, properties) =>
    -        var finalStage: Stage = null
    -        try {
    -          // New stage creation may throw an exception if, for example, 
jobs are run on a HadoopRDD
    -          // whose underlying HDFS files have been deleted.
    -          finalStage = newStage(rdd, partitions.size, None, jobId, 
Some(callSite))
    -        } catch {
    -          case e: Exception =>
    -            logWarning("Creating new stage failed due to exception - job: 
" + jobId, e)
    -            listener.jobFailed(e)
    -            return false
    -        }
    -        val job = new ActiveJob(jobId, finalStage, func, partitions, 
callSite, listener, properties)
    -        clearCacheLocs()
    -        logInfo("Got job " + job.jobId + " (" + callSite + ") with " + 
partitions.length +
    -                " output partitions (allowLocal=" + allowLocal + ")")
    -        logInfo("Final stage: " + finalStage + " (" + finalStage.name + 
")")
    -        logInfo("Parents of final stage: " + finalStage.parents)
    -        logInfo("Missing parents: " + getMissingParentStages(finalStage))
    -        if (allowLocal && finalStage.parents.size == 0 && 
partitions.length == 1) {
    -          // Compute very short actions like first() or take() with no 
parent stages locally.
    -          listenerBus.post(SparkListenerJobStart(job.jobId, Array[Int](), 
properties))
    -          runLocally(job)
    -        } else {
    -          jobIdToActiveJob(jobId) = job
    -          activeJobs += job
    -          resultStageToJob(finalStage) = job
    -          listenerBus.post(
    -            SparkListenerJobStart(job.jobId, 
jobIdToStageIds(jobId).toArray, properties))
    -          submitStage(finalStage)
    -        }
    -
    -      case StageCancelled(stageId) =>
    -        handleStageCancellation(stageId)
    --- End diff --
    
    This event didn't make it into the `receive` of your 
`DAGSchedulerEventProcessActor`, and it is definitely needed.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

Reply via email to