Repository: spark
Updated Branches:
  refs/heads/master bbc247548 -> 0dc4310b4


[SPARK-16694][CORE] Use for/foreach rather than map for Unit expressions whose 
side effects are required

## What changes were proposed in this pull request?

Use foreach/for instead of map where operation requires execution of body, not 
actually defining a transformation

## How was this patch tested?

Jenkins

Author: Sean Owen <[email protected]>

Closes #14332 from srowen/SPARK-16694.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/0dc4310b
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/0dc4310b
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/0dc4310b

Branch: refs/heads/master
Commit: 0dc4310b470c7e4355c0da67ca3373c3013cc9dd
Parents: bbc2475
Author: Sean Owen <[email protected]>
Authored: Sat Jul 30 04:42:38 2016 -0700
Committer: Sean Owen <[email protected]>
Committed: Sat Jul 30 04:42:38 2016 -0700

----------------------------------------------------------------------
 .../org/apache/spark/api/r/RBackendHandler.scala      |  4 ++--
 .../main/scala/org/apache/spark/deploy/Client.scala   | 14 ++++++++------
 .../scala/org/apache/spark/rdd/CoalescedRDD.scala     | 14 +++++++-------
 .../main/scala/org/apache/spark/rdd/HadoopRDD.scala   |  4 ++--
 .../mesos/MesosCoarseGrainedSchedulerBackend.scala    |  4 +---
 .../org/apache/spark/ImplicitOrderingSuite.scala      |  4 ++--
 .../scala/org/apache/spark/PartitioningSuite.scala    |  6 +++---
 .../scala/org/apache/spark/rdd/PipedRDDSuite.scala    |  4 ++--
 .../apache/spark/util/TimeStampedHashMapSuite.scala   |  4 ++--
 .../apache/spark/examples/ml/DataFrameExample.scala   |  9 ++++-----
 .../spark/examples/ml/DecisionTreeExample.scala       |  9 ++++-----
 .../org/apache/spark/examples/ml/GBTExample.scala     |  9 ++++-----
 .../spark/examples/ml/LinearRegressionExample.scala   |  9 ++++-----
 .../spark/examples/ml/LogisticRegressionExample.scala |  9 ++++-----
 .../spark/examples/ml/RandomForestExample.scala       |  9 ++++-----
 .../spark/examples/mllib/BinaryClassification.scala   |  9 ++++-----
 .../apache/spark/examples/mllib/Correlations.scala    |  9 ++++-----
 .../spark/examples/mllib/CosineSimilarity.scala       |  9 ++++-----
 .../spark/examples/mllib/DecisionTreeRunner.scala     |  9 ++++-----
 .../org/apache/spark/examples/mllib/DenseKMeans.scala |  9 ++++-----
 .../apache/spark/examples/mllib/FPGrowthExample.scala |  9 ++++-----
 .../examples/mllib/GradientBoostedTreesRunner.scala   |  9 ++++-----
 .../org/apache/spark/examples/mllib/LDAExample.scala  | 10 ++++------
 .../spark/examples/mllib/LinearRegression.scala       |  9 ++++-----
 .../apache/spark/examples/mllib/MovieLensALS.scala    |  9 ++++-----
 .../spark/examples/mllib/MultivariateSummarizer.scala |  9 ++++-----
 .../mllib/PowerIterationClusteringExample.scala       |  9 ++++-----
 .../org/apache/spark/examples/mllib/SampledRDDs.scala |  9 ++++-----
 .../spark/examples/mllib/SparseNaiveBayes.scala       |  9 ++++-----
 .../scala/org/apache/spark/graphx/GraphSuite.scala    |  2 +-
 .../org/apache/spark/mllib/clustering/KMeans.scala    |  2 +-
 .../org/apache/spark/ml/feature/Word2VecSuite.scala   |  4 ++--
 .../spark/mllib/classification/NaiveBayesSuite.scala  |  2 +-
 .../spark/mllib/random/RandomDataGeneratorSuite.scala |  6 +++---
 .../sql/catalyst/expressions/PredicateSuite.scala     |  4 ++--
 .../compression/CompressionSchemeBenchmark.scala      |  4 ++--
 .../columnar/compression/IntegralDeltaSuite.scala     |  2 +-
 .../datasources/FileSourceStrategySuite.scala         |  4 ++--
 .../spark/streaming/ReceiverInputDStreamSuite.scala   |  2 +-
 39 files changed, 125 insertions(+), 146 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/core/src/main/scala/org/apache/spark/api/r/RBackendHandler.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/api/r/RBackendHandler.scala 
b/core/src/main/scala/org/apache/spark/api/r/RBackendHandler.scala
index c416e83..7d53482 100644
--- a/core/src/main/scala/org/apache/spark/api/r/RBackendHandler.scala
+++ b/core/src/main/scala/org/apache/spark/api/r/RBackendHandler.scala
@@ -198,7 +198,7 @@ private[r] class RBackendHandler(server: RBackend)
       args: Array[Object]): Option[Int] = {
     val numArgs = args.length
 
-    for (index <- 0 until parameterTypesOfMethods.length) {
+    for (index <- parameterTypesOfMethods.indices) {
       val parameterTypes = parameterTypesOfMethods(index)
 
       if (parameterTypes.length == numArgs) {
@@ -240,7 +240,7 @@ private[r] class RBackendHandler(server: RBackend)
           // Convert args if needed
           val parameterTypes = parameterTypesOfMethods(index)
 
-          (0 until numArgs).map { i =>
+          for (i <- 0 until numArgs) {
             if (parameterTypes(i) == classOf[Seq[Any]] && 
args(i).getClass.isArray) {
               // Convert a Java array to scala Seq
               args(i) = args(i).asInstanceOf[Array[_]].toSeq

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/core/src/main/scala/org/apache/spark/deploy/Client.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/deploy/Client.scala 
b/core/src/main/scala/org/apache/spark/deploy/Client.scala
index 640f25f..bf2dab6 100644
--- a/core/src/main/scala/org/apache/spark/deploy/Client.scala
+++ b/core/src/main/scala/org/apache/spark/deploy/Client.scala
@@ -116,7 +116,7 @@ private class ClientEndpoint(
   }
 
   /* Find out driver status then exit the JVM */
-  def pollAndReportStatus(driverId: String) {
+  def pollAndReportStatus(driverId: String): Unit = {
     // Since ClientEndpoint is the only RpcEndpoint in the process, blocking 
the event loop thread
     // is fine.
     logInfo("... waiting before polling master for driver state")
@@ -137,12 +137,14 @@ private class ClientEndpoint(
           case _ =>
         }
         // Exception, if present
-        statusResponse.exception.map { e =>
-          logError(s"Exception from cluster was: $e")
-          e.printStackTrace()
-          System.exit(-1)
+        statusResponse.exception match {
+          case Some(e) =>
+            logError(s"Exception from cluster was: $e")
+            e.printStackTrace()
+            System.exit(-1)
+          case _ =>
+            System.exit(0)
         }
-        System.exit(0)
     }
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/core/src/main/scala/org/apache/spark/rdd/CoalescedRDD.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/rdd/CoalescedRDD.scala 
b/core/src/main/scala/org/apache/spark/rdd/CoalescedRDD.scala
index 2ec9846..9c198a6 100644
--- a/core/src/main/scala/org/apache/spark/rdd/CoalescedRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/CoalescedRDD.scala
@@ -183,14 +183,14 @@ private class DefaultPartitionCoalescer(val balanceSlack: 
Double = 0.10)
 
     getAllPrefLocs(prev)
 
-    // gets all the preffered locations of the previous RDD and splits them 
into partitions
+    // gets all the preferred locations of the previous RDD and splits them 
into partitions
     // with preferred locations and ones without
-    def getAllPrefLocs(prev: RDD[_]) {
+    def getAllPrefLocs(prev: RDD[_]): Unit = {
       val tmpPartsWithLocs = mutable.LinkedHashMap[Partition, Seq[String]]()
       // first get the locations for each partition, only do this once since 
it can be expensive
       prev.partitions.foreach(p => {
           val locs = currPrefLocs(p, prev)
-          if (locs.size > 0) {
+          if (locs.nonEmpty) {
             tmpPartsWithLocs.put(p, locs)
           } else {
             partsWithoutLocs += p
@@ -198,13 +198,13 @@ private class DefaultPartitionCoalescer(val balanceSlack: 
Double = 0.10)
         }
       )
       // convert it into an array of host to partition
-      (0 to 2).map(x =>
-        tmpPartsWithLocs.foreach(parts => {
+      for (x <- 0 to 2) {
+        tmpPartsWithLocs.foreach { parts =>
           val p = parts._1
           val locs = parts._2
           if (locs.size > x) partsWithLocs += ((locs(x), p))
-        } )
-      )
+        }
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
----------------------------------------------------------------------
diff --git a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala 
b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
index 515fd6f..99afe02 100644
--- a/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
+++ b/core/src/main/scala/org/apache/spark/rdd/HadoopRDD.scala
@@ -155,7 +155,7 @@ class HadoopRDD[K, V](
         logDebug("Cloning Hadoop Configuration")
         val newJobConf = new JobConf(conf)
         if (!conf.isInstanceOf[JobConf]) {
-          initLocalJobConfFuncOpt.map(f => f(newJobConf))
+          initLocalJobConfFuncOpt.foreach(f => f(newJobConf))
         }
         newJobConf
       }
@@ -174,7 +174,7 @@ class HadoopRDD[K, V](
         HadoopRDD.CONFIGURATION_INSTANTIATION_LOCK.synchronized {
           logDebug("Creating new JobConf and caching it for later re-use")
           val newJobConf = new JobConf(conf)
-          initLocalJobConfFuncOpt.map(f => f(newJobConf))
+          initLocalJobConfFuncOpt.foreach(f => f(newJobConf))
           HadoopRDD.putCachedMetadata(jobConfCacheKey, newJobConf)
           newJobConf
         }

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala
----------------------------------------------------------------------
diff --git 
a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala
 
b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala
index 959d6fd..263e619 100644
--- 
a/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala
+++ 
b/core/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosCoarseGrainedSchedulerBackend.scala
@@ -220,9 +220,7 @@ private[spark] class MesosCoarseGrainedSchedulerBackend(
       command.addUris(CommandInfo.URI.newBuilder().setValue(uri.get))
     }
 
-    conf.getOption("spark.mesos.uris").map { uris =>
-      setupUris(uris, command)
-    }
+    conf.getOption("spark.mesos.uris").foreach(setupUris(_, command))
 
     command.build()
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/core/src/test/scala/org/apache/spark/ImplicitOrderingSuite.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/org/apache/spark/ImplicitOrderingSuite.scala 
b/core/src/test/scala/org/apache/spark/ImplicitOrderingSuite.scala
index 939f12f..b9d1811 100644
--- a/core/src/test/scala/org/apache/spark/ImplicitOrderingSuite.scala
+++ b/core/src/test/scala/org/apache/spark/ImplicitOrderingSuite.scala
@@ -30,11 +30,11 @@ class ImplicitOrderingSuite extends SparkFunSuite with 
LocalSparkContext {
 
     // Infer orderings after basic maps to particular types
     val basicMapExpectations = ImplicitOrderingSuite.basicMapExpectations(rdd)
-    basicMapExpectations.map({case (met, explain) => assert(met, explain)})
+    basicMapExpectations.foreach { case (met, explain) => assert(met, explain) 
}
 
     // Infer orderings for other RDD methods
     val otherRDDMethodExpectations = 
ImplicitOrderingSuite.otherRDDMethodExpectations(rdd)
-    otherRDDMethodExpectations.map({case (met, explain) => assert(met, 
explain)})
+    otherRDDMethodExpectations.foreach { case (met, explain) => assert(met, 
explain) }
   }
 }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/core/src/test/scala/org/apache/spark/PartitioningSuite.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/org/apache/spark/PartitioningSuite.scala 
b/core/src/test/scala/org/apache/spark/PartitioningSuite.scala
index c5d4968..34c0178 100644
--- a/core/src/test/scala/org/apache/spark/PartitioningSuite.scala
+++ b/core/src/test/scala/org/apache/spark/PartitioningSuite.scala
@@ -71,9 +71,9 @@ class PartitioningSuite extends SparkFunSuite with 
SharedSparkContext with Priva
     val partitionSizes = List(1, 2, 10, 100, 500, 1000, 1500)
     val partitioners = partitionSizes.map(p => (p, new RangePartitioner(p, 
rdd)))
     val decoratedRangeBounds = PrivateMethod[Array[Int]]('rangeBounds)
-    partitioners.map { case (numPartitions, partitioner) =>
+    partitioners.foreach { case (numPartitions, partitioner) =>
       val rangeBounds = partitioner.invokePrivate(decoratedRangeBounds())
-      1.to(1000).map { element => {
+      for (element <- 1 to 1000) {
         val partition = partitioner.getPartition(element)
         if (numPartitions > 1) {
           if (partition < rangeBounds.size) {
@@ -85,7 +85,7 @@ class PartitioningSuite extends SparkFunSuite with 
SharedSparkContext with Priva
         } else {
           assert(partition === 0)
         }
-      }}
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/core/src/test/scala/org/apache/spark/rdd/PipedRDDSuite.scala
----------------------------------------------------------------------
diff --git a/core/src/test/scala/org/apache/spark/rdd/PipedRDDSuite.scala 
b/core/src/test/scala/org/apache/spark/rdd/PipedRDDSuite.scala
index f8d523f..59b9097 100644
--- a/core/src/test/scala/org/apache/spark/rdd/PipedRDDSuite.scala
+++ b/core/src/test/scala/org/apache/spark/rdd/PipedRDDSuite.scala
@@ -96,7 +96,7 @@ class PipedRDDSuite extends SparkFunSuite with 
SharedSparkContext {
       val piped = nums.pipe(Seq("cat"),
         Map[String, String](),
         (f: String => Unit) => {
-          bl.value.map(f(_)); f("\u0001")
+          bl.value.foreach(f); f("\u0001")
         },
         (i: Int, f: String => Unit) => f(i + "_"))
 
@@ -117,7 +117,7 @@ class PipedRDDSuite extends SparkFunSuite with 
SharedSparkContext {
         pipe(Seq("cat"),
           Map[String, String](),
           (f: String => Unit) => {
-            bl.value.map(f(_)); f("\u0001")
+            bl.value.foreach(f); f("\u0001")
           },
           (i: Tuple2[String, Iterable[String]], f: String => Unit) => {
             for (e <- i._2) {

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/core/src/test/scala/org/apache/spark/util/TimeStampedHashMapSuite.scala
----------------------------------------------------------------------
diff --git 
a/core/src/test/scala/org/apache/spark/util/TimeStampedHashMapSuite.scala 
b/core/src/test/scala/org/apache/spark/util/TimeStampedHashMapSuite.scala
index 25fc15d..fd9add7 100644
--- a/core/src/test/scala/org/apache/spark/util/TimeStampedHashMapSuite.scala
+++ b/core/src/test/scala/org/apache/spark/util/TimeStampedHashMapSuite.scala
@@ -171,8 +171,8 @@ class TimeStampedHashMapSuite extends SparkFunSuite {
     })
 
     test(name + " - threading safety test")  {
-      threads.map(_.start)
-      threads.map(_.join)
+      threads.foreach(_.start())
+      threads.foreach(_.join())
       assert(!error)
     }
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala
index 38c1c1c..e07c9a4 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/DataFrameExample.scala
@@ -54,14 +54,13 @@ object DataFrameExample {
       }
     }
 
-    parser.parse(args, defaultParams).map { params =>
-      run(params)
-    }.getOrElse {
-      sys.exit(1)
+    parser.parse(args, defaultParams) match {
+      case Some(params) => run(params)
+      case _ => sys.exit(1)
     }
   }
 
-  def run(params: Params) {
+  def run(params: Params): Unit = {
     val spark = SparkSession
       .builder
       .appName(s"DataFrameExample with $params")

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala
index de44745..1745281 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala
@@ -124,10 +124,9 @@ object DecisionTreeExample {
       }
     }
 
-    parser.parse(args, defaultParams).map { params =>
-      run(params)
-    }.getOrElse {
-      sys.exit(1)
+    parser.parse(args, defaultParams) match {
+      case Some(params) => run(params)
+      case _ => sys.exit(1)
     }
   }
 
@@ -197,7 +196,7 @@ object DecisionTreeExample {
     (training, test)
   }
 
-  def run(params: Params) {
+  def run(params: Params): Unit = {
     val spark = SparkSession
       .builder
       .appName(s"DecisionTreeExample with $params")

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/examples/src/main/scala/org/apache/spark/examples/ml/GBTExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/GBTExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/ml/GBTExample.scala
index a4274ae..db55298 100644
--- a/examples/src/main/scala/org/apache/spark/examples/ml/GBTExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/ml/GBTExample.scala
@@ -127,14 +127,13 @@ object GBTExample {
       }
     }
 
-    parser.parse(args, defaultParams).map { params =>
-      run(params)
-    }.getOrElse {
-      sys.exit(1)
+    parser.parse(args, defaultParams) match {
+      case Some(params) => run(params)
+      case _ => sys.exit(1)
     }
   }
 
-  def run(params: Params) {
+  def run(params: Params): Unit = {
     val spark = SparkSession
       .builder
       .appName(s"GBTExample with $params")

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionExample.scala
index de96fb2..31ba180 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/LinearRegressionExample.scala
@@ -96,14 +96,13 @@ object LinearRegressionExample {
       }
     }
 
-    parser.parse(args, defaultParams).map { params =>
-      run(params)
-    }.getOrElse {
-      sys.exit(1)
+    parser.parse(args, defaultParams) match {
+      case Some(params) => run(params)
+      case _ => sys.exit(1)
     }
   }
 
-  def run(params: Params) {
+  def run(params: Params): Unit = {
     val spark = SparkSession
       .builder
       .appName(s"LinearRegressionExample with $params")

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionExample.scala
index c2a87e1..c67b538 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/LogisticRegressionExample.scala
@@ -103,14 +103,13 @@ object LogisticRegressionExample {
       }
     }
 
-    parser.parse(args, defaultParams).map { params =>
-      run(params)
-    }.getOrElse {
-      sys.exit(1)
+    parser.parse(args, defaultParams) match {
+      case Some(params) => run(params)
+      case _ => sys.exit(1)
     }
   }
 
-  def run(params: Params) {
+  def run(params: Params): Unit = {
     val spark = SparkSession
       .builder
       .appName(s"LogisticRegressionExample with $params")

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestExample.scala
index 2419dc4..a9e07c0 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/ml/RandomForestExample.scala
@@ -133,14 +133,13 @@ object RandomForestExample {
       }
     }
 
-    parser.parse(args, defaultParams).map { params =>
-      run(params)
-    }.getOrElse {
-      sys.exit(1)
+    parser.parse(args, defaultParams) match {
+      case Some(params) => run(params)
+      case _ => sys.exit(1)
     }
   }
 
-  def run(params: Params) {
+  def run(params: Params): Unit = {
     val spark = SparkSession
       .builder
       .appName(s"RandomForestExample with $params")

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/examples/src/main/scala/org/apache/spark/examples/mllib/BinaryClassification.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/mllib/BinaryClassification.scala
 
b/examples/src/main/scala/org/apache/spark/examples/mllib/BinaryClassification.scala
index 2282bd2..a1a5b59 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/mllib/BinaryClassification.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/mllib/BinaryClassification.scala
@@ -95,14 +95,13 @@ object BinaryClassification {
         """.stripMargin)
     }
 
-    parser.parse(args, defaultParams).map { params =>
-      run(params)
-    } getOrElse {
-      sys.exit(1)
+    parser.parse(args, defaultParams) match {
+      case Some(params) => run(params)
+      case _ => sys.exit(1)
     }
   }
 
-  def run(params: Params) {
+  def run(params: Params): Unit = {
     val conf = new SparkConf().setAppName(s"BinaryClassification with $params")
     val sc = new SparkContext(conf)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/examples/src/main/scala/org/apache/spark/examples/mllib/Correlations.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/mllib/Correlations.scala 
b/examples/src/main/scala/org/apache/spark/examples/mllib/Correlations.scala
index e003f35..0b44c33 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/Correlations.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/Correlations.scala
@@ -56,14 +56,13 @@ object Correlations {
         """.stripMargin)
     }
 
-    parser.parse(args, defaultParams).map { params =>
-      run(params)
-    } getOrElse {
-        sys.exit(1)
+    parser.parse(args, defaultParams) match {
+      case Some(params) => run(params)
+      case _ => sys.exit(1)
     }
   }
 
-  def run(params: Params) {
+  def run(params: Params): Unit = {
     val conf = new SparkConf().setAppName(s"Correlations with $params")
     val sc = new SparkContext(conf)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/examples/src/main/scala/org/apache/spark/examples/mllib/CosineSimilarity.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/mllib/CosineSimilarity.scala
 
b/examples/src/main/scala/org/apache/spark/examples/mllib/CosineSimilarity.scala
index 5ff3d36..681465d 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/mllib/CosineSimilarity.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/mllib/CosineSimilarity.scala
@@ -68,14 +68,13 @@ object CosineSimilarity {
         """.stripMargin)
     }
 
-    parser.parse(args, defaultParams).map { params =>
-      run(params)
-    } getOrElse {
-      System.exit(1)
+    parser.parse(args, defaultParams) match {
+      case Some(params) => run(params)
+      case _ => sys.exit(1)
     }
   }
 
-  def run(params: Params) {
+  def run(params: Params): Unit = {
     val conf = new SparkConf().setAppName("CosineSimilarity")
     val sc = new SparkContext(conf)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala
 
b/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala
index a85aa2c..0ad0465 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala
@@ -149,10 +149,9 @@ object DecisionTreeRunner {
       }
     }
 
-    parser.parse(args, defaultParams).map { params =>
-      run(params)
-    }.getOrElse {
-      sys.exit(1)
+    parser.parse(args, defaultParams) match {
+      case Some(params) => run(params)
+      case _ => sys.exit(1)
     }
   }
 
@@ -253,7 +252,7 @@ object DecisionTreeRunner {
     (training, test, numClasses)
   }
 
-  def run(params: Params) {
+  def run(params: Params): Unit = {
 
     val conf = new SparkConf().setAppName(s"DecisionTreeRunner with $params")
     val sc = new SparkContext(conf)

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/examples/src/main/scala/org/apache/spark/examples/mllib/DenseKMeans.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/mllib/DenseKMeans.scala 
b/examples/src/main/scala/org/apache/spark/examples/mllib/DenseKMeans.scala
index 380d85d..b228827 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/DenseKMeans.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/DenseKMeans.scala
@@ -69,14 +69,13 @@ object DenseKMeans {
         .action((x, c) => c.copy(input = x))
     }
 
-    parser.parse(args, defaultParams).map { params =>
-      run(params)
-    }.getOrElse {
-      sys.exit(1)
+    parser.parse(args, defaultParams) match {
+      case Some(params) => run(params)
+      case _ => sys.exit(1)
     }
   }
 
-  def run(params: Params) {
+  def run(params: Params): Unit = {
     val conf = new SparkConf().setAppName(s"DenseKMeans with $params")
     val sc = new SparkContext(conf)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/examples/src/main/scala/org/apache/spark/examples/mllib/FPGrowthExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/mllib/FPGrowthExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/mllib/FPGrowthExample.scala
index a7a3ead..6435abc 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/mllib/FPGrowthExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/mllib/FPGrowthExample.scala
@@ -53,14 +53,13 @@ object FPGrowthExample {
         .action((x, c) => c.copy(input = x))
     }
 
-    parser.parse(args, defaultParams).map { params =>
-      run(params)
-    }.getOrElse {
-      sys.exit(1)
+    parser.parse(args, defaultParams) match {
+      case Some(params) => run(params)
+      case _ => sys.exit(1)
     }
   }
 
-  def run(params: Params) {
+  def run(params: Params): Unit = {
     val conf = new SparkConf().setAppName(s"FPGrowthExample with $params")
     val sc = new SparkContext(conf)
     val transactions = sc.textFile(params.input).map(_.split(" ")).cache()

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/examples/src/main/scala/org/apache/spark/examples/mllib/GradientBoostedTreesRunner.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/mllib/GradientBoostedTreesRunner.scala
 
b/examples/src/main/scala/org/apache/spark/examples/mllib/GradientBoostedTreesRunner.scala
index 90e4687..4020c6b 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/mllib/GradientBoostedTreesRunner.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/mllib/GradientBoostedTreesRunner.scala
@@ -85,14 +85,13 @@ object GradientBoostedTreesRunner {
       }
     }
 
-    parser.parse(args, defaultParams).map { params =>
-      run(params)
-    }.getOrElse {
-      sys.exit(1)
+    parser.parse(args, defaultParams) match {
+      case Some(params) => run(params)
+      case _ => sys.exit(1)
     }
   }
 
-  def run(params: Params) {
+  def run(params: Params): Unit = {
 
     val conf = new SparkConf().setAppName(s"GradientBoostedTreesRunner with 
$params")
     val sc = new SparkContext(conf)

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala 
b/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
index 3fbf8e0..7e50b12 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/LDAExample.scala
@@ -98,15 +98,13 @@ object LDAExample {
         .action((x, c) => c.copy(input = c.input :+ x))
     }
 
-    parser.parse(args, defaultParams).map { params =>
-      run(params)
-    }.getOrElse {
-      parser.showUsageAsError
-      sys.exit(1)
+    parser.parse(args, defaultParams) match {
+      case Some(params) => run(params)
+      case _ => sys.exit(1)
     }
   }
 
-  private def run(params: Params) {
+  private def run(params: Params): Unit = {
     val conf = new SparkConf().setAppName(s"LDAExample with $params")
     val sc = new SparkContext(conf)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/examples/src/main/scala/org/apache/spark/examples/mllib/LinearRegression.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/mllib/LinearRegression.scala
 
b/examples/src/main/scala/org/apache/spark/examples/mllib/LinearRegression.scala
index a702030..86aec36 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/mllib/LinearRegression.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/mllib/LinearRegression.scala
@@ -82,14 +82,13 @@ object LinearRegression {
         """.stripMargin)
     }
 
-    parser.parse(args, defaultParams).map { params =>
-      run(params)
-    } getOrElse {
-      sys.exit(1)
+    parser.parse(args, defaultParams) match {
+      case Some(params) => run(params)
+      case _ => sys.exit(1)
     }
   }
 
-  def run(params: Params) {
+  def run(params: Params): Unit = {
     val conf = new SparkConf().setAppName(s"LinearRegression with $params")
     val sc = new SparkContext(conf)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/examples/src/main/scala/org/apache/spark/examples/mllib/MovieLensALS.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/mllib/MovieLensALS.scala 
b/examples/src/main/scala/org/apache/spark/examples/mllib/MovieLensALS.scala
index 09750e5..9bd6927 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/MovieLensALS.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/MovieLensALS.scala
@@ -89,14 +89,13 @@ object MovieLensALS {
         """.stripMargin)
     }
 
-    parser.parse(args, defaultParams).map { params =>
-      run(params)
-    } getOrElse {
-      System.exit(1)
+    parser.parse(args, defaultParams) match {
+      case Some(params) => run(params)
+      case _ => sys.exit(1)
     }
   }
 
-  def run(params: Params) {
+  def run(params: Params): Unit = {
     val conf = new SparkConf().setAppName(s"MovieLensALS with $params")
     if (params.kryo) {
       conf.registerKryoClasses(Array(classOf[mutable.BitSet], classOf[Rating]))

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/examples/src/main/scala/org/apache/spark/examples/mllib/MultivariateSummarizer.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/mllib/MultivariateSummarizer.scala
 
b/examples/src/main/scala/org/apache/spark/examples/mllib/MultivariateSummarizer.scala
index 3c59817..f9e47e4 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/mllib/MultivariateSummarizer.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/mllib/MultivariateSummarizer.scala
@@ -57,14 +57,13 @@ object MultivariateSummarizer {
         """.stripMargin)
     }
 
-    parser.parse(args, defaultParams).map { params =>
-      run(params)
-    } getOrElse {
-        sys.exit(1)
+    parser.parse(args, defaultParams) match {
+      case Some(params) => run(params)
+      case _ => sys.exit(1)
     }
   }
 
-  def run(params: Params) {
+  def run(params: Params): Unit = {
     val conf = new SparkConf().setAppName(s"MultivariateSummarizer with 
$params")
     val sc = new SparkContext(conf)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/examples/src/main/scala/org/apache/spark/examples/mllib/PowerIterationClusteringExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/mllib/PowerIterationClusteringExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/mllib/PowerIterationClusteringExample.scala
index a81c9b3..986496c 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/mllib/PowerIterationClusteringExample.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/mllib/PowerIterationClusteringExample.scala
@@ -77,14 +77,13 @@ object PowerIterationClusteringExample {
         .action((x, c) => c.copy(maxIterations = x))
     }
 
-    parser.parse(args, defaultParams).map { params =>
-      run(params)
-    }.getOrElse {
-      sys.exit(1)
+    parser.parse(args, defaultParams) match {
+      case Some(params) => run(params)
+      case _ => sys.exit(1)
     }
   }
 
-  def run(params: Params) {
+  def run(params: Params): Unit = {
     val conf = new SparkConf()
       .setMaster("local")
       .setAppName(s"PowerIterationClustering with $params")

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/examples/src/main/scala/org/apache/spark/examples/mllib/SampledRDDs.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/mllib/SampledRDDs.scala 
b/examples/src/main/scala/org/apache/spark/examples/mllib/SampledRDDs.scala
index 0da4005..ba3deae 100644
--- a/examples/src/main/scala/org/apache/spark/examples/mllib/SampledRDDs.scala
+++ b/examples/src/main/scala/org/apache/spark/examples/mllib/SampledRDDs.scala
@@ -52,14 +52,13 @@ object SampledRDDs {
         """.stripMargin)
     }
 
-    parser.parse(args, defaultParams).map { params =>
-      run(params)
-    } getOrElse {
-      sys.exit(1)
+    parser.parse(args, defaultParams) match {
+      case Some(params) => run(params)
+      case _ => sys.exit(1)
     }
   }
 
-  def run(params: Params) {
+  def run(params: Params): Unit = {
     val conf = new SparkConf().setAppName(s"SampledRDDs with $params")
     val sc = new SparkContext(conf)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/examples/src/main/scala/org/apache/spark/examples/mllib/SparseNaiveBayes.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/mllib/SparseNaiveBayes.scala
 
b/examples/src/main/scala/org/apache/spark/examples/mllib/SparseNaiveBayes.scala
index f81fc29..b76add2 100644
--- 
a/examples/src/main/scala/org/apache/spark/examples/mllib/SparseNaiveBayes.scala
+++ 
b/examples/src/main/scala/org/apache/spark/examples/mllib/SparseNaiveBayes.scala
@@ -60,14 +60,13 @@ object SparseNaiveBayes {
         .action((x, c) => c.copy(input = x))
     }
 
-    parser.parse(args, defaultParams).map { params =>
-      run(params)
-    }.getOrElse {
-      sys.exit(1)
+    parser.parse(args, defaultParams) match {
+      case Some(params) => run(params)
+      case _ => sys.exit(1)
     }
   }
 
-  def run(params: Params) {
+  def run(params: Params): Unit = {
     val conf = new SparkConf().setAppName(s"SparseNaiveBayes with $params")
     val sc = new SparkContext(conf)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/graphx/src/test/scala/org/apache/spark/graphx/GraphSuite.scala
----------------------------------------------------------------------
diff --git a/graphx/src/test/scala/org/apache/spark/graphx/GraphSuite.scala 
b/graphx/src/test/scala/org/apache/spark/graphx/GraphSuite.scala
index 96aa262..88b59a3 100644
--- a/graphx/src/test/scala/org/apache/spark/graphx/GraphSuite.scala
+++ b/graphx/src/test/scala/org/apache/spark/graphx/GraphSuite.scala
@@ -62,7 +62,7 @@ class GraphSuite extends SparkFunSuite with LocalSparkContext 
{
       assert( graph.edges.count() === rawEdges.size )
       // Vertices not explicitly provided but referenced by edges should be 
created automatically
       assert( graph.vertices.count() === 100)
-      graph.triplets.collect().map { et =>
+      graph.triplets.collect().foreach { et =>
         assert((et.srcId < 10 && et.srcAttr) || (et.srcId >= 10 && 
!et.srcAttr))
         assert((et.dstId < 10 && et.dstAttr) || (et.dstId >= 10 && 
!et.dstAttr))
       }

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala 
b/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala
index 871b1c7..9a3d64f 100644
--- a/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala
+++ b/mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala
@@ -268,7 +268,7 @@ class KMeans private (
 
     val iterationStartTime = System.nanoTime()
 
-    instr.map(_.logNumFeatures(centers(0)(0).vector.size))
+    instr.foreach(_.logNumFeatures(centers(0)(0).vector.size))
 
     // Execute iterations of Lloyd's algorithm until all runs have converged
     while (iteration < maxIterations && !activeRuns.isEmpty) {

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/mllib/src/test/scala/org/apache/spark/ml/feature/Word2VecSuite.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/test/scala/org/apache/spark/ml/feature/Word2VecSuite.scala 
b/mllib/src/test/scala/org/apache/spark/ml/feature/Word2VecSuite.scala
index 16c74f6..0b441f8 100644
--- a/mllib/src/test/scala/org/apache/spark/ml/feature/Word2VecSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/ml/feature/Word2VecSuite.scala
@@ -138,8 +138,8 @@ class Word2VecSuite extends SparkFunSuite with 
MLlibTestSparkContext with Defaul
       case Row(w: String, sim: Double) => (w, sim)
     }.collect().unzip
 
-    assert(synonyms.toArray === Array("b", "c"))
-    expectedSimilarity.zip(similarity).map {
+    assert(synonyms === Array("b", "c"))
+    expectedSimilarity.zip(similarity).foreach {
       case (expected, actual) => assert(math.abs((expected - actual) / 
expected) < 1E-5)
     }
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/mllib/src/test/scala/org/apache/spark/mllib/classification/NaiveBayesSuite.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/test/scala/org/apache/spark/mllib/classification/NaiveBayesSuite.scala
 
b/mllib/src/test/scala/org/apache/spark/mllib/classification/NaiveBayesSuite.scala
index 0c0aefc..5ec4c15 100644
--- 
a/mllib/src/test/scala/org/apache/spark/mllib/classification/NaiveBayesSuite.scala
+++ 
b/mllib/src/test/scala/org/apache/spark/mllib/classification/NaiveBayesSuite.scala
@@ -307,7 +307,7 @@ class NaiveBayesSuite extends SparkFunSuite with 
MLlibTestSparkContext {
     val tempDir = Utils.createTempDir()
     val path = tempDir.toURI.toString
 
-    Seq(NaiveBayesSuite.binaryBernoulliModel, 
NaiveBayesSuite.binaryMultinomialModel).map {
+    Seq(NaiveBayesSuite.binaryBernoulliModel, 
NaiveBayesSuite.binaryMultinomialModel).foreach {
       model =>
         // Save model, load it back, and compare.
         try {

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/mllib/src/test/scala/org/apache/spark/mllib/random/RandomDataGeneratorSuite.scala
----------------------------------------------------------------------
diff --git 
a/mllib/src/test/scala/org/apache/spark/mllib/random/RandomDataGeneratorSuite.scala
 
b/mllib/src/test/scala/org/apache/spark/mllib/random/RandomDataGeneratorSuite.scala
index 8416771..e30ad15 100644
--- 
a/mllib/src/test/scala/org/apache/spark/mllib/random/RandomDataGeneratorSuite.scala
+++ 
b/mllib/src/test/scala/org/apache/spark/mllib/random/RandomDataGeneratorSuite.scala
@@ -80,7 +80,7 @@ class RandomDataGeneratorSuite extends SparkFunSuite {
   }
 
   test("LogNormalGenerator") {
-    List((0.0, 1.0), (0.0, 2.0), (2.0, 1.0), (2.0, 2.0)).map {
+    List((0.0, 1.0), (0.0, 2.0), (2.0, 1.0), (2.0, 2.0)).foreach {
       case (mean: Double, vari: Double) =>
         val normal = new LogNormalGenerator(mean, math.sqrt(vari))
         apiChecks(normal)
@@ -125,7 +125,7 @@ class RandomDataGeneratorSuite extends SparkFunSuite {
 
   test("GammaGenerator") {
     // mean = 0.0 will not pass the API checks since 0.0 is always 
deterministically produced.
-    List((1.0, 2.0), (2.0, 2.0), (3.0, 2.0), (5.0, 1.0), (9.0, 0.5)).map {
+    List((1.0, 2.0), (2.0, 2.0), (3.0, 2.0), (5.0, 1.0), (9.0, 0.5)).foreach {
       case (shape: Double, scale: Double) =>
         val gamma = new GammaGenerator(shape, scale)
         apiChecks(gamma)
@@ -138,7 +138,7 @@ class RandomDataGeneratorSuite extends SparkFunSuite {
   }
 
   test("WeibullGenerator") {
-    List((1.0, 2.0), (2.0, 3.0), (2.5, 3.5), (10.4, 2.222)).map {
+    List((1.0, 2.0), (2.0, 3.0), (2.5, 3.5), (10.4, 2.222)).foreach {
       case (alpha: Double, beta: Double) =>
         val weibull = new WeibullGenerator(alpha, beta)
         apiChecks(weibull)

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/PredicateSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/PredicateSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/PredicateSuite.scala
index b3f2069..2a445b8 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/PredicateSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/PredicateSuite.scala
@@ -141,7 +141,7 @@ class PredicateSuite extends SparkFunSuite with 
ExpressionEvalHelper {
 
     val primitiveTypes = Seq(IntegerType, FloatType, DoubleType, StringType, 
ByteType, ShortType,
       LongType, BinaryType, BooleanType, DecimalType.USER_DEFAULT, 
TimestampType)
-    primitiveTypes.map { t =>
+    primitiveTypes.foreach { t =>
       val dataGen = RandomDataGenerator.forType(t, nullable = true).get
       val inputData = Seq.fill(10) {
         val value = dataGen.apply()
@@ -182,7 +182,7 @@ class PredicateSuite extends SparkFunSuite with 
ExpressionEvalHelper {
 
     val primitiveTypes = Seq(IntegerType, FloatType, DoubleType, StringType, 
ByteType, ShortType,
       LongType, BinaryType, BooleanType, DecimalType.USER_DEFAULT, 
TimestampType)
-    primitiveTypes.map { t =>
+    primitiveTypes.foreach { t =>
       val dataGen = RandomDataGenerator.forType(t, nullable = true).get
       val inputData = Seq.fill(10) {
         val value = dataGen.apply()

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala
index 1aadd70..babf944 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/CompressionSchemeBenchmark.scala
@@ -79,7 +79,7 @@ object CompressionSchemeBenchmark extends 
AllCompressionSchemes {
       input: ByteBuffer): Unit = {
     val benchmark = new Benchmark(name, iters * count)
 
-    schemes.filter(_.supports(tpe)).map { scheme =>
+    schemes.filter(_.supports(tpe)).foreach { scheme =>
       val (compressFunc, compressionRatio, buf) = prepareEncodeInternal(count, 
tpe, scheme, input)
       val label = 
s"${getFormattedClassName(scheme)}(${compressionRatio.formatted("%.3f")})"
 
@@ -103,7 +103,7 @@ object CompressionSchemeBenchmark extends 
AllCompressionSchemes {
       input: ByteBuffer): Unit = {
     val benchmark = new Benchmark(name, iters * count)
 
-    schemes.filter(_.supports(tpe)).map { scheme =>
+    schemes.filter(_.supports(tpe)).foreach { scheme =>
       val (compressFunc, _, buf) = prepareEncodeInternal(count, tpe, scheme, 
input)
       val compressedBuf = compressFunc(input, buf)
       val label = s"${getFormattedClassName(scheme)}"

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/IntegralDeltaSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/IntegralDeltaSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/IntegralDeltaSuite.scala
index 988a577..a530e27 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/IntegralDeltaSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/IntegralDeltaSuite.scala
@@ -47,7 +47,7 @@ class IntegralDeltaSuite extends SparkFunSuite {
         }
       }
 
-      input.map { value =>
+      input.foreach { value =>
         val row = new GenericMutableRow(1)
         columnType.setField(row, 0, value)
         builder.appendFrom(row, 0)

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala
index ddcc24a..2f551b1 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala
@@ -343,7 +343,7 @@ class FileSourceStrategySuite extends QueryTest with 
SharedSQLContext with Predi
 
   test("SPARK-15654 do not split non-splittable files") {
     // Check if a non-splittable file is not assigned into partitions
-    Seq("gz", "snappy", "lz4").map { suffix =>
+    Seq("gz", "snappy", "lz4").foreach { suffix =>
        val table = createTable(
         files = Seq(s"file1.${suffix}" -> 3, s"file2.${suffix}" -> 1, 
s"file3.${suffix}" -> 1)
       )
@@ -359,7 +359,7 @@ class FileSourceStrategySuite extends QueryTest with 
SharedSQLContext with Predi
     }
 
     // Check if a splittable compressed file is assigned into multiple 
partitions
-    Seq("bz2").map { suffix =>
+    Seq("bz2").foreach { suffix =>
        val table = createTable(
          files = Seq(s"file1.${suffix}" -> 3, s"file2.${suffix}" -> 1, 
s"file3.${suffix}" -> 1)
       )

http://git-wip-us.apache.org/repos/asf/spark/blob/0dc4310b/streaming/src/test/scala/org/apache/spark/streaming/ReceiverInputDStreamSuite.scala
----------------------------------------------------------------------
diff --git 
a/streaming/src/test/scala/org/apache/spark/streaming/ReceiverInputDStreamSuite.scala
 
b/streaming/src/test/scala/org/apache/spark/streaming/ReceiverInputDStreamSuite.scala
index 6763ac6..0349e11 100644
--- 
a/streaming/src/test/scala/org/apache/spark/streaming/ReceiverInputDStreamSuite.scala
+++ 
b/streaming/src/test/scala/org/apache/spark/streaming/ReceiverInputDStreamSuite.scala
@@ -34,7 +34,7 @@ class ReceiverInputDStreamSuite extends TestSuiteBase with 
BeforeAndAfterAll {
 
   override def afterAll(): Unit = {
     try {
-      StreamingContext.getActive().map { _.stop() }
+      StreamingContext.getActive().foreach(_.stop())
     } finally {
       super.afterAll()
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to