Github user JoshRosen commented on a diff in the pull request:

    https://github.com/apache/spark/pull/8829#discussion_r40033212
  
    --- Diff: 
core/src/main/scala/org/apache/spark/shuffle/sort/SortShuffleManager.scala ---
    @@ -19,10 +19,97 @@ package org.apache.spark.shuffle.sort
     
     import java.util.concurrent.ConcurrentHashMap
     
    -import org.apache.spark.{Logging, SparkConf, TaskContext, 
ShuffleDependency}
    +import org.apache.spark._
    +import org.apache.spark.serializer.Serializer
     import org.apache.spark.shuffle._
    -import org.apache.spark.shuffle.hash.HashShuffleReader
     
    +/**
    + * Subclass of [[BaseShuffleHandle]], used to identify when we've chosen 
to use the new shuffle.
    + */
    +private[spark] class UnsafeShuffleHandle[K, V](
    +    shuffleId: Int,
    +    numMaps: Int,
    +    dependency: ShuffleDependency[K, V, V])
    +  extends BaseShuffleHandle(shuffleId, numMaps, dependency) {
    +}
    +
    +private[spark] object SortShuffleManager extends Logging {
    +
    +  /**
    +   * The maximum number of shuffle output partitions that 
UnsafeShuffleManager supports.
    +   */
    +  val MAX_SHUFFLE_OUTPUT_PARTITIONS = 
PackedRecordPointer.MAXIMUM_PARTITION_ID + 1
    +
    +  /**
    +   * Helper method for determining whether a shuffle should use the 
optimized unsafe shuffle
    +   * path or whether it should fall back to the original sort-based 
shuffle.
    +   */
    +  def canUseUnsafeShuffle[K, V, C](dependency: ShuffleDependency[K, V, 
C]): Boolean = {
    +    val shufId = dependency.shuffleId
    +    val serializer = Serializer.getSerializer(dependency.serializer)
    +    if (!serializer.supportsRelocationOfSerializedObjects) {
    +      log.debug(s"Can't use UnsafeShuffle for shuffle $shufId because the 
serializer, " +
    +        s"${serializer.getClass.getName}, does not support object 
relocation")
    +      false
    +    } else if (dependency.aggregator.isDefined) {
    +      log.debug(s"Can't use UnsafeShuffle for shuffle $shufId because an 
aggregator is defined")
    +      false
    +    } else if (dependency.partitioner.numPartitions > 
MAX_SHUFFLE_OUTPUT_PARTITIONS) {
    +      log.debug(s"Can't use UnsafeShuffle for shuffle $shufId because it 
has more than " +
    +        s"$MAX_SHUFFLE_OUTPUT_PARTITIONS partitions")
    +      false
    +    } else {
    +      log.debug(s"Can use UnsafeShuffle for shuffle $shufId")
    +      true
    +    }
    +  }
    +}
    +
    +/**
    + * A shuffle implementation that uses directly-managed memory to implement 
several performance
    --- End diff --
    
    TODO: update these comments.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to