Github user tgravescs commented on a diff in the pull request:

    https://github.com/apache/spark/pull/11241#discussion_r55564719
  
    --- Diff: core/src/main/scala/org/apache/spark/storage/BlockManager.scala 
---
    @@ -555,28 +563,49 @@ private[spark] class BlockManager(
         preferredLocs ++ otherLocs
       }
     
    -  private def doGetRemote(blockId: BlockId, asBlockResult: Boolean): 
Option[Any] = {
    +  /**
    +   * VisibleForTesting
    +   */
    +  private[spark] def doGetRemote(blockId: BlockId, blockTransferService: 
BlockTransferService,
    +                                 asBlockResult: Boolean): Option[Any] = {
         require(blockId != null, "BlockId is null")
    +    var runningFailureCount = 0
    +    var totalFailureCount = 0
         val locations = getLocations(blockId)
    -    var numFetchFailures = 0
    -    for (loc <- locations) {
    +    val maxFetchFailures = locations.size
    +    var locationIterator = locations.iterator
    +    while (locationIterator.hasNext) {
    +      val loc = locationIterator.next()
           logDebug(s"Getting remote block $blockId from $loc")
           val data = try {
             blockTransferService.fetchBlockSync(
               loc.host, loc.port, loc.executorId, 
blockId.toString).nioByteBuffer()
           } catch {
             case NonFatal(e) =>
    -          numFetchFailures += 1
    -          if (numFetchFailures == locations.size) {
    +          runningFailureCount += 1
    +          totalFailureCount += 1
    +
    +          if (totalFailureCount >= maxFetchFailures) {
                 // An exception is thrown while fetching this block from all 
locations
    -            throw new BlockFetchException(s"Failed to fetch block from" +
    -              s" ${locations.size} locations. Most recent failure cause:", 
e)
    -          } else {
    -            // This location failed, so we retry fetch from a different 
one by returning null here
    -            logWarning(s"Failed to fetch remote block $blockId " +
    -              s"from $loc (failed attempt $numFetchFailures)", e)
    -            null
    +            throw new BlockFetchException(s"Failed to fetch block after" +
    +              s" ${totalFailureCount} fetch failures. Most recent failure 
cause:", e)
    +          }
    +
    +          // This location failed, so we retry fetch from a different one 
by returning null here
    +          logWarning(s"Failed to fetch remote block $blockId " +
    +            s"from $loc (failed attempt $runningFailureCount)", e)
    +
    +          // If there is a large number of executors then locations list 
can contain a
    +          // large number of stale entries causing a large number of 
retries that may
    +          // take a significant amount of time. To get rid of these stale 
entries
    +          // we refresh the block locations after a certain number of 
fetch failures
    +          if (runningFailureCount >= 
BlockManagerConfiguration.maxFailuresBeforeLocationRefresh) {
    +            locationIterator = 
Random.shuffle(master.getLocations(blockId)).iterator
    --- End diff --
    
    there is a new function getLocations() that should be used.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to