Github user jose-torres commented on a diff in the pull request: https://github.com/apache/spark/pull/21293#discussion_r187367699 --- Diff: core/src/main/scala/org/apache/spark/MapOutputTracker.scala --- @@ -233,6 +239,28 @@ private[spark] class MapOutputTrackerMasterEndpoint( logInfo("MapOutputTrackerMasterEndpoint stopped!") context.reply(true) stop() + + case CheckNoMissingPartitions(shuffleId: Int) => + logInfo(s"Checking missing partitions for $shuffleId") + // If get None from findMissingPartitions, just return a non-empty Seq + val missing = tracker.findMissingPartitions(shuffleId).getOrElse(Seq(0)) + if (missing.isEmpty) { + context.reply(true) + } else { + context.reply(false) + } + + case CheckAndRegisterShuffle(shuffleId: Int, numMaps: Int) => --- End diff -- There's already a way to register shuffles. Why is this RPC needed?
--- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org