Github user tdas commented on a diff in the pull request:

    https://github.com/apache/spark/pull/7276#discussion_r35286631
  
    --- Diff: 
streaming/src/main/scala/org/apache/spark/streaming/scheduler/ReceiverTracker.scala
 ---
    @@ -258,171 +319,242 @@ class ReceiverTracker(ssc: StreamingContext, 
skipReceiverLaunch: Boolean = false
         logWarning(s"Error reported by receiver for stream $streamId: 
$messageWithError")
       }
     
    +  private def scheduleReceiver(receiverId: Int): Seq[String] = {
    +    val preferredLocation = 
receiverPreferredLocations.getOrElse(receiverId, None)
    +    val scheduledLocations = schedulingPolicy.rescheduleReceiver(
    +      receiverId, preferredLocation, receiverTrackingInfos, getExecutors)
    +    updateReceiverScheduledLocations(receiverId, scheduledLocations)
    +    scheduledLocations
    +  }
    +
    +  private def updateReceiverScheduledLocations(
    +      receiverId: Int, scheduledLocations: Seq[String]): Unit = {
    +    val newReceiverTrackingInfo = receiverTrackingInfos.get(receiverId) 
match {
    +      case Some(oldInfo) =>
    +        oldInfo.copy(state = ReceiverState.SCHEDULED,
    +          scheduledLocations = Some(scheduledLocations))
    +      case None =>
    +        ReceiverTrackingInfo(
    +          receiverId,
    +          ReceiverState.SCHEDULED,
    +          Some(scheduledLocations),
    +          None)
    +    }
    +    receiverTrackingInfos.put(receiverId, newReceiverTrackingInfo)
    +  }
    +
       /** Check if any blocks are left to be processed */
       def hasUnallocatedBlocks: Boolean = {
         receivedBlockTracker.hasUnallocatedReceivedBlocks
       }
     
    +  /**
    +   * Get the list of executors excluding driver
    +   */
    +  private def getExecutors: Seq[String] = {
    +    if (ssc.sc.isLocal) {
    +      Seq(ssc.sparkContext.env.blockManager.blockManagerId.hostPort)
    +    } else {
    +      ssc.sparkContext.env.blockManager.master.getMemoryStatus.filter { 
case (blockManagerId, _) =>
    +        blockManagerId.executorId != SparkContext.DRIVER_IDENTIFIER // 
Ignore the driver location
    +      }.map { case (blockManagerId, _) => blockManagerId.hostPort }.toSeq
    +    }
    +  }
    +
    +  /**
    +   * Run the dummy Spark job to ensure that all slaves have registered. 
This avoids all the
    +   * receivers to be scheduled on the same node.
    +   *
    +   * TODO Should poll the executor number and wait for executors according 
to
    +   * "spark.scheduler.minRegisteredResourcesRatio" and
    +   * "spark.scheduler.maxRegisteredResourcesWaitingTime" rather than 
running a dummy job.
    +   */
    +  private def runDummySparkJob(): Unit = {
    +    if (!ssc.sparkContext.isLocal) {
    +      ssc.sparkContext.makeRDD(1 to 50, 50).map(x => (x, 1)).reduceByKey(_ 
+ _, 20).collect()
    +    }
    +    assert(getExecutors.nonEmpty)
    +  }
    +
    +  /**
    +   * Get the receivers from the ReceiverInputDStreams, distributes them to 
the
    +   * worker nodes as a parallel collection, and runs them.
    +   */
    +  private def launchReceivers(): Unit = {
    +    val receivers = receiverInputStreams.map(nis => {
    +      val rcvr = nis.getReceiver()
    +      rcvr.setReceiverId(nis.id)
    +      rcvr
    +    })
    +
    +    runDummySparkJob()
    +
    +    logInfo("Starting " + receivers.length + " receivers")
    +    endpoint.send(StartAllReceivers(receivers))
    +  }
    +
    +  /** Check if tracker has been marked for starting */
    +  private def isTrackerStarted: Boolean = trackerState == Started
    +
    +  /** Check if tracker has been marked for stopping */
    +  private def isTrackerStopping: Boolean = trackerState == Stopping
    +
    +  /** Check if tracker has been marked for stopped */
    +  private def isTrackerStopped: Boolean = trackerState == Stopped
    +
       /** RpcEndpoint to receive messages from the receivers. */
       private class ReceiverTrackerEndpoint(override val rpcEnv: RpcEnv) 
extends ThreadSafeRpcEndpoint {
     
    +    // TODO Remove this thread pool after 
https://github.com/apache/spark/issues/7385 is merged
    --- End diff --
    
    Is there any drawback in implementing in the current way? Is it that with 
100 receivers, there will be 100 threads stuck. 


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to