Github user squito commented on a diff in the pull request:

    https://github.com/apache/spark/pull/6423#discussion_r32017015
  
    --- Diff: 
core/src/main/scala/org/apache/spark/shuffle/hash/HashShuffleReader.scala ---
    @@ -33,11 +33,34 @@ private[spark] class HashShuffleReader[K, C](
         "Hash shuffle currently only supports fetching one partition")
     
       private val dep = handle.dependency
    +  private val blockManager = SparkEnv.get.blockManager
     
       /** Read the combined key-values for this reduce task */
       override def read(): Iterator[Product2[K, C]] = {
    +    val blockStreams = BlockStoreShuffleFetcher.fetchBlockStreams(
    +      handle.shuffleId,  startPartition, context)
    +
    +    // Wrap the streams for compression based on configuration
    +    val wrappedStreams = blockStreams.map { case (blockId, inputStream) =>
    +      blockManager.wrapForCompression(blockId, inputStream)
    +    }
    +
         val ser = Serializer.getSerializer(dep.serializer)
    -    val iter = BlockStoreShuffleFetcher.fetch(handle.shuffleId, 
startPartition, context, ser)
    +    val serializerInstance = ser.newInstance()
    +
    +    // Create a key/value iterator for each stream
    +    val recordIterator = wrappedStreams.flatMap { wrappedStream =>
    +      
serializerInstance.deserializeStream(wrappedStream).asKeyValueIterator
    +    }
    +
    +    // Update read metrics for each record materialized
    +    val iter = new InterruptibleIterator[Any](context, recordIterator) {
    +     val readMetrics = 
context.taskMetrics.createShuffleReadMetricsForDependency()
    +     override def next(): Any = {
    +       readMetrics.incRecordsRead(1)
    +       delegate.next()
    +     }
    +    }.asInstanceOf[Iterator[Nothing]]
    --- End diff --
    
    This version is nice and short, but it does make it a bit hard to follow 
the types.  What do you think of more explicit casting in each branch, to make 
it more clear what is going on?  eg.:
    
    ```scala
        // Update read metrics for each record materialized
        val iter = new InterruptibleIterator[(Any, Any)](context, 
recordIterator) {
         val readMetrics = 
context.taskMetrics.createShuffleReadMetricsForDependency()
         override def next(): Any = {
           readMetrics.incRecordsRead(1)
           delegate.next()
         }
        }
    
        val aggregatedIter: Iterator[Product2[K, C]] = if 
(dep.aggregator.isDefined) {
          if (dep.mapSideCombine) {
            // we are reading values that are already combined
            val combinedKeyValuesIterator = iter.asInstanceOf[Iterator[(K,C)]]
            new InterruptibleIterator(context,
              
dep.aggregator.get.combineCombinersByKey(combinedKeyValuesIterator, context))
          } else {
            // we don't know the value type, but also don't care -- the 
dependency *should*
            // have made sure its compatible w/ this aggregator, which will 
convert the value
            // type to the combined type C
            val keyValuesIterator = iter.asInstanceOf[Iterator[(K,Nothing)]]
            new InterruptibleIterator(context,
              dep.aggregator.get.combineValuesByKey(keyValuesIterator, context))
          }
         ...
    ```
    
    this is just an idea ... I'm not entirely convinced myself.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to