Github user cloud-fan commented on a diff in the pull request:

    https://github.com/apache/spark/pull/16989#discussion_r115928177
  
    --- Diff: 
core/src/main/scala/org/apache/spark/storage/ShuffleBlockFetcherIterator.scala 
---
    @@ -175,33 +193,54 @@ final class ShuffleBlockFetcherIterator(
         val sizeMap = req.blocks.map { case (blockId, size) => 
(blockId.toString, size) }.toMap
         val remainingBlocks = new HashSet[String]() ++= sizeMap.keys
         val blockIds = req.blocks.map(_._1.toString)
    -
         val address = req.address
    -    shuffleClient.fetchBlocks(address.host, address.port, 
address.executorId, blockIds.toArray,
    -      new BlockFetchingListener {
    -        override def onBlockFetchSuccess(blockId: String, buf: 
ManagedBuffer): Unit = {
    -          // Only add the buffer to results queue if the iterator is not 
zombie,
    -          // i.e. cleanup() has not been called yet.
    -          ShuffleBlockFetcherIterator.this.synchronized {
    -            if (!isZombie) {
    -              // Increment the ref count because we need to pass this to a 
different thread.
    -              // This needs to be released after use.
    -              buf.retain()
    -              remainingBlocks -= blockId
    -              results.put(new SuccessFetchResult(BlockId(blockId), 
address, sizeMap(blockId), buf,
    -                remainingBlocks.isEmpty))
    -              logDebug("remainingBlocks: " + remainingBlocks)
    -            }
    +
    +    val blockFetchingListener = new BlockFetchingListener {
    +      override def onBlockFetchSuccess(blockId: String, buf: 
ManagedBuffer): Unit = {
    +        // Only add the buffer to results queue if the iterator is not 
zombie,
    +        // i.e. cleanup() has not been called yet.
    +        ShuffleBlockFetcherIterator.this.synchronized {
    +          if (!isZombie) {
    +            // Increment the ref count because we need to pass this to a 
different thread.
    +            // This needs to be released after use.
    +            buf.retain()
    +            remainingBlocks -= blockId
    +            results.put(new SuccessFetchResult(BlockId(blockId), address, 
sizeMap(blockId), buf,
    +              remainingBlocks.isEmpty))
    +            logDebug("remainingBlocks: " + remainingBlocks)
               }
    -          logTrace("Got remote block " + blockId + " after " + 
Utils.getUsedTimeMs(startTime))
             }
    +        logTrace("Got remote block " + blockId + " after " + 
Utils.getUsedTimeMs(startTime))
    +      }
     
    -        override def onBlockFetchFailure(blockId: String, e: Throwable): 
Unit = {
    -          logError(s"Failed to get block(s) from 
${req.address.host}:${req.address.port}", e)
    -          results.put(new FailureFetchResult(BlockId(blockId), address, e))
    -        }
    +      override def onBlockFetchFailure(blockId: String, e: Throwable): 
Unit = {
    +        logError(s"Failed to get block(s) from 
${req.address.host}:${req.address.port}", e)
    +        results.put(new FailureFetchResult(BlockId(blockId), address, e))
           }
    -    )
    +    }
    +
    +    // Shuffle remote blocks to disk when the request is too large or 
local memory shortage.
    +    val fetchToDisk = if (bytesInFlight > maxBytesInFlight) {
    --- End diff --
    
    yea this is a good point. My only concern is, a giant shuffle block may 
take all the memory and cause a lot of spilling for following operators like 
join, aggregate, etc.
    
    how about we add a new config to set a threshold?


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to