Github user zsxwing commented on a diff in the pull request:
https://github.com/apache/spark/pull/19763#discussion_r151801786
--- Diff: core/src/main/scala/org/apache/spark/MapOutputTracker.scala ---
@@ -472,17 +474,36 @@ private[spark] class MapOutputTrackerMaster(
shuffleStatuses.get(shuffleId).map(_.findMissingPartitions())
}
+ /**
+ * Try to equally divide Range(0, num) to divisor slices
+ */
+ def equallyDivide(num: Int, divisor: Int): Iterator[Seq[Int]] = {
+ assert(divisor > 0, "Divisor should be positive")
+ val (each, remain) = (num / divisor, num % divisor)
+ val (smaller, bigger) = (0 until num).splitAt((divisor-remain) * each)
+ if (each != 0) {
+ smaller.grouped(each) ++ bigger.grouped(each + 1)
+ } else {
+ bigger.grouped(each + 1)
+ }
+ }
+
/**
* Return statistics about all of the outputs for a given shuffle.
*/
def getStatistics(dep: ShuffleDependency[_, _, _]): MapOutputStatistics
= {
shuffleStatuses(dep.shuffleId).withMapStatuses { statuses =>
val totalSizes = new Array[Long](dep.partitioner.numPartitions)
- for (s <- statuses) {
- for (i <- 0 until totalSizes.length) {
- totalSizes(i) += s.getSizeForBlock(i)
+ val parallelism = conf.getInt("spark.adaptive.map.statistics.cores",
8)
+
+ val mapStatusSubmitTasks = equallyDivide(totalSizes.length,
parallelism).map {
--- End diff --
Doing this is not cheap. I would add a config and only run this in multiple
threads when `#mapper * #shuffle_partitions` is large.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]