Github user andrewor14 commented on a diff in the pull request: https://github.com/apache/spark/pull/147#discussion_r10635962 --- Diff: core/src/test/scala/org/apache/spark/MapOutputTrackerSuite.scala --- @@ -136,4 +123,47 @@ class MapOutputTrackerSuite extends FunSuite with LocalSparkContext { // failure should be cached intercept[FetchFailedException] { slaveTracker.getServerStatuses(10, 0) } } + + test("remote fetch exceeding akka frame size") { + val newConf = new SparkConf + newConf.set("spark.akka.frameSize", "1") + newConf.set("spark.akka.askTimeout", "1") // Fail fast + val (masterTracker, slaveTracker) = setUpMasterSlaveSystem(newConf) + + // Frame size should be ~123B, and no exception should be thrown + masterTracker.registerShuffle(10, 1) + masterTracker.registerMapOutput(10, 0, new MapStatus( + BlockManagerId("88", "mph", 1000, 0), Array.fill[Byte](10)(0))) + slaveTracker.getServerStatuses(10, 0) + + // Frame size should be ~1.1MB, and MapOutputTrackerMasterActor should throw exception + masterTracker.registerShuffle(20, 100) + (0 until 100).foreach { i => + masterTracker.registerMapOutput(20, i, new MapStatus( + BlockManagerId("999", "mps", 1000, 0), Array.fill[Byte](4000000)(0))) + } + intercept[SparkException] { slaveTracker.getServerStatuses(20, 0) } + } + + private def setUpMasterSlaveSystem(conf: SparkConf) = { --- End diff -- Note that test before this one also sets up an entire actor system but finishes in under a second. This test is ~4 seconds long because we have to wait for the akka timeouts before an exception is thrown. In any case, I used the akka test framework you suggested and that considerably simplified the test. (Note that the duration is still the same, however)
--- If your project is set up for it, you can reply to this email and have your reply appear on GitHub as well. If your project does not have this feature enabled and wishes so, or if the feature is enabled but not working, please contact infrastructure at infrastruct...@apache.org or file a JIRA ticket with INFRA. ---