Github user zsxwing commented on a diff in the pull request:

    https://github.com/apache/spark/pull/8180#discussion_r38916631
  
    --- Diff: core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala 
---
    @@ -720,31 +843,82 @@ class DAGScheduler(
         try {
           // New stage creation may throw an exception if, for example, jobs 
are run on a
           // HadoopRDD whose underlying HDFS files have been deleted.
    -      finalStage = newResultStage(finalRDD, partitions.length, jobId, 
callSite)
    +      finalStage = newResultStage(finalRDD, func, partitions, jobId, 
callSite)
         } catch {
           case e: Exception =>
             logWarning("Creating new stage failed due to exception - job: " + 
jobId, e)
             listener.jobFailed(e)
             return
         }
    -    if (finalStage != null) {
    -      val job = new ActiveJob(jobId, finalStage, func, partitions, 
callSite, listener, properties)
    -      clearCacheLocs()
    -      logInfo("Got job %s (%s) with %d output partitions".format(
    -        job.jobId, callSite.shortForm, partitions.length))
    -      logInfo("Final stage: " + finalStage + "(" + finalStage.name + ")")
    -      logInfo("Parents of final stage: " + finalStage.parents)
    -      logInfo("Missing parents: " + getMissingParentStages(finalStage))
    -      val jobSubmissionTime = clock.getTimeMillis()
    -      jobIdToActiveJob(jobId) = job
    -      activeJobs += job
    -      finalStage.resultOfJob = Some(job)
    -      val stageIds = jobIdToStageIds(jobId).toArray
    -      val stageInfos = stageIds.flatMap(id => 
stageIdToStage.get(id).map(_.latestInfo))
    +
    +    val job = new ActiveJob(jobId, finalStage, callSite, listener, 
properties)
    +    clearCacheLocs()
    +    logInfo("Got job %s (%s) with %d output partitions".format(
    +      job.jobId, callSite.shortForm, partitions.length))
    +    logInfo("Final stage: " + finalStage + " (" + finalStage.name + ")")
    +    logInfo("Parents of final stage: " + finalStage.parents)
    +    logInfo("Missing parents: " + getMissingParentStages(finalStage))
    +
    +    val jobSubmissionTime = clock.getTimeMillis()
    +    jobIdToActiveJob(jobId) = job
    +    activeJobs += job
    +    finalStage.resultOfJob = Some(job)
    +    val stageIds = jobIdToStageIds(jobId).toArray
    +    val stageInfos = stageIds.flatMap(id => 
stageIdToStage.get(id).map(_.latestInfo))
    +    listenerBus.post(
    +      SparkListenerJobStart(job.jobId, jobSubmissionTime, stageInfos, 
properties))
    +    submitStage(finalStage)
    +
    +    submitWaitingStages()
    +  }
    +
    +  private[scheduler] def handleMapStageSubmitted(jobId: Int,
    +      dependency: ShuffleDependency[_, _, _],
    +      callSite: CallSite,
    +      listener: JobListener,
    +      properties: Properties) {
    +    // Submitting this map stage might still require the creation of some 
parent stages, so make
    +    // sure that happens.
    +    var finalStage: ShuffleMapStage = null
    +    try {
    +      // New stage creation may throw an exception if, for example, jobs 
are run on a
    +      // HadoopRDD whose underlying HDFS files have been deleted.
    +      finalStage = getShuffleMapStage(dependency, jobId)
    +    } catch {
    +      case e: Exception =>
    +        logWarning("Creating new stage failed due to exception - job: " + 
jobId, e)
    +        listener.jobFailed(e)
    +        return
    +    }
    +
    +    val job = new ActiveJob(jobId, finalStage, callSite, listener, 
properties)
    +    clearCacheLocs()
    +    logInfo("Got map stage job %s (%s) with %d output partitions".format(
    +      jobId, callSite.shortForm, dependency.rdd.partitions.size))
    +    logInfo("Final stage: " + finalStage + " (" + finalStage.name + ")")
    +    logInfo("Parents of final stage: " + finalStage.parents)
    +    logInfo("Missing parents: " + getMissingParentStages(finalStage))
    +
    +    val jobSubmissionTime = clock.getTimeMillis()
    +    jobIdToActiveJob(jobId) = job
    +    activeJobs += job
    +    finalStage.mapStageJobs = job :: finalStage.mapStageJobs
    +    val stageIds = jobIdToStageIds(jobId).toArray
    +    val stageInfos = stageIds.flatMap(id => 
stageIdToStage.get(id).map(_.latestInfo))
    +    listenerBus.post(
    +      SparkListenerJobStart(job.jobId, jobSubmissionTime, stageInfos, 
properties))
    +    submitStage(finalStage)
    +
    +    // If the whole stage has already finished, tell the listener and 
remove it
    +    if (!finalStage.outputLocs.contains(Nil)) {
    +      job.finished(0) = true
    +      job.numFinished += 1
    --- End diff --
    
    Just feel a bit weird here, although `job.finished` and `job.numFinished` 
is meaningless for a `submitMapStage`. Maybe it's better to add some comments 
here.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to