Style fixes
Project: http://git-wip-us.apache.org/repos/asf/incubator-spark/repo Commit: http://git-wip-us.apache.org/repos/asf/incubator-spark/commit/36966f65 Tree: http://git-wip-us.apache.org/repos/asf/incubator-spark/tree/36966f65 Diff: http://git-wip-us.apache.org/repos/asf/incubator-spark/diff/36966f65 Branch: refs/heads/scala-2.10 Commit: 36966f65df2947a78d32d731f6b004d015ff011e Parents: 3f7e9b2 Author: Kay Ousterhout <kayousterh...@gmail.com> Authored: Wed Oct 9 15:36:34 2013 -0700 Committer: Kay Ousterhout <kayousterh...@gmail.com> Committed: Wed Oct 9 15:36:34 2013 -0700 ---------------------------------------------------------------------- .../spark/scheduler/SparkListenerSuite.scala | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/incubator-spark/blob/36966f65/core/src/test/scala/org/apache/spark/scheduler/SparkListenerSuite.scala ---------------------------------------------------------------------- diff --git a/core/src/test/scala/org/apache/spark/scheduler/SparkListenerSuite.scala b/core/src/test/scala/org/apache/spark/scheduler/SparkListenerSuite.scala index 6e80262..0d8742c 100644 --- a/core/src/test/scala/org/apache/spark/scheduler/SparkListenerSuite.scala +++ b/core/src/test/scala/org/apache/spark/scheduler/SparkListenerSuite.scala @@ -54,16 +54,23 @@ class SparkListenerSuite extends FunSuite with LocalSparkContext with ShouldMatc assert(sc.dagScheduler.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)) listener.stageInfos.size should be (4) - listener.stageInfos.foreach {stageInfo => - //small test, so some tasks might take less than 1 millisecond, but average should be greater than 1 ms + listener.stageInfos.foreach { stageInfo => + /* small test, so some tasks might take less than 1 millisecond, but average should be greater + * than 0 ms. */ checkNonZeroAvg(stageInfo.taskInfos.map{_._1.duration}, stageInfo + " duration") - checkNonZeroAvg(stageInfo.taskInfos.map{_._2.executorRunTime.toLong}, stageInfo + " executorRunTime") - checkNonZeroAvg(stageInfo.taskInfos.map{_._2.executorDeserializeTime.toLong}, stageInfo + " executorDeserializeTime") + checkNonZeroAvg( + stageInfo.taskInfos.map{_._2.executorRunTime.toLong}, + stageInfo + " executorRunTime") + checkNonZeroAvg( + stageInfo.taskInfos.map{_._2.executorDeserializeTime.toLong}, + stageInfo + " executorDeserializeTime") if (stageInfo.stage.rdd.name == d4.name) { - checkNonZeroAvg(stageInfo.taskInfos.map{_._2.shuffleReadMetrics.get.fetchWaitTime}, stageInfo + " fetchWaitTime") + checkNonZeroAvg( + stageInfo.taskInfos.map{_._2.shuffleReadMetrics.get.fetchWaitTime}, + stageInfo + " fetchWaitTime") } - stageInfo.taskInfos.foreach{case (taskInfo, taskMetrics) => + stageInfo.taskInfos.foreach { case (taskInfo, taskMetrics) => taskMetrics.resultSize should be > (0l) if (isStage(stageInfo, Set(d2.name, d3.name), Set(d4.name))) { taskMetrics.shuffleWriteMetrics should be ('defined)