This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-2.4
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-2.4 by this push:
     new 63e93a5  [SPARK-34059][SQL][CORE][2.4] Use for/foreach rather than map 
to make sure execute it eagerly
63e93a5 is described below

commit 63e93a5c38a83669ccc58a5b45d5cff0b296fcc9
Author: HyukjinKwon <gurwls...@apache.org>
AuthorDate: Tue Jan 12 13:03:12 2021 +0900

    [SPARK-34059][SQL][CORE][2.4] Use for/foreach rather than map to make sure 
execute it eagerly
    
    ### What changes were proposed in this pull request?
    
    This is a backport of https://github.com/apache/spark/pull/31110. I ran 
intelliJ inspection again in this branch.
    
    This PR is basically a followup of 
https://github.com/apache/spark/pull/14332.
    Calling `map` alone might leave it not executed due to lazy evaluation, 
e.g.)
    
    ```
    scala> val foo = Seq(1,2,3)
    foo: Seq[Int] = List(1, 2, 3)
    
    scala> foo.map(println)
    1
    2
    3
    res0: Seq[Unit] = List((), (), ())
    
    scala> foo.view.map(println)
    res1: scala.collection.SeqView[Unit,Seq[_]] = SeqViewM(...)
    
    scala> foo.view.foreach(println)
    1
    2
    3
    ```
    
    We should better use `foreach` to make sure it's executed where the output 
is unused or `Unit`.
    
    ### Why are the changes needed?
    
    To prevent the potential issues by not executing `map`.
    
    ### Does this PR introduce _any_ user-facing change?
    
    No, the current codes look not causing any problem for now.
    
    ### How was this patch tested?
    
    I found these item by running IntelliJ inspection, double checked one by 
one, and fixed them. These should be all instances across the codebase ideally.
    
    Closes #31139 from HyukjinKwon/SPARK-34059-2.4.
    
    Authored-by: HyukjinKwon <gurwls...@apache.org>
    Signed-off-by: HyukjinKwon <gurwls...@apache.org>
---
 core/src/main/scala/org/apache/spark/api/python/PythonRunner.scala    | 2 +-
 .../test/scala/org/apache/spark/sql/avro/AvroLogicalTypeSuite.scala   | 2 +-
 .../spark/scheduler/cluster/mesos/MesosSchedulerBackendUtil.scala     | 2 +-
 .../spark/sql/execution/columnar/compression/IntegralDeltaSuite.scala | 2 +-
 .../sql/execution/columnar/compression/PassThroughEncodingSuite.scala | 4 ++--
 5 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/core/src/main/scala/org/apache/spark/api/python/PythonRunner.scala 
b/core/src/main/scala/org/apache/spark/api/python/PythonRunner.scala
index 2e59723..1864ad7 100644
--- a/core/src/main/scala/org/apache/spark/api/python/PythonRunner.scala
+++ b/core/src/main/scala/org/apache/spark/api/python/PythonRunner.scala
@@ -208,7 +208,7 @@ private[spark] abstract class BasePythonRunner[IN, OUT](
             /* backlog */ 1,
             InetAddress.getByName("localhost")))
           // A call to accept() for ServerSocket shall block infinitely.
-          serverSocket.map(_.setSoTimeout(0))
+          serverSocket.foreach(_.setSoTimeout(0))
           new Thread("accept-connections") {
             setDaemon(true)
 
diff --git 
a/external/avro/src/test/scala/org/apache/spark/sql/avro/AvroLogicalTypeSuite.scala
 
b/external/avro/src/test/scala/org/apache/spark/sql/avro/AvroLogicalTypeSuite.scala
index 79ba287..26198e8 100644
--- 
a/external/avro/src/test/scala/org/apache/spark/sql/avro/AvroLogicalTypeSuite.scala
+++ 
b/external/avro/src/test/scala/org/apache/spark/sql/avro/AvroLogicalTypeSuite.scala
@@ -246,7 +246,7 @@ class AvroLogicalTypeSuite extends QueryTest with 
SharedSQLContext with SQLTestU
     dataFileWriter.create(schema, new File(avroFile))
     val logicalType = LogicalTypes.decimal(precision, scale)
 
-    decimalInputData.map { x =>
+    decimalInputData.foreach { x =>
       val avroRec = new GenericData.Record(schema)
       val decimal = new java.math.BigDecimal(x).setScale(scale)
       val bytes =
diff --git 
a/resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackendUtil.scala
 
b/resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackendUtil.scala
index def3325..b993148 100644
--- 
a/resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackendUtil.scala
+++ 
b/resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackendUtil.scala
@@ -137,7 +137,7 @@ private[mesos] object MesosSchedulerBackendUtil extends 
Logging {
     val containerInfo = ContainerInfo.newBuilder()
       .setType(containerType)
 
-    conf.getOption("spark.mesos.executor.docker.image").map { image =>
+    conf.getOption("spark.mesos.executor.docker.image").foreach { image =>
       val forcePullImage = conf
         .getOption("spark.mesos.executor.docker.forcePullImage")
         .exists(_.equals("true"))
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/IntegralDeltaSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/IntegralDeltaSuite.scala
index 0d9f1fb..3ca7b1e 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/IntegralDeltaSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/IntegralDeltaSuite.scala
@@ -116,7 +116,7 @@ class IntegralDeltaSuite extends SparkFunSuite {
       val row = new GenericInternalRow(1)
       val nullRow = new GenericInternalRow(1)
       nullRow.setNullAt(0)
-      input.map { value =>
+      input.foreach { value =>
         if (value == nullValue) {
           builder.appendFrom(nullRow, 0)
         } else {
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/PassThroughEncodingSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/PassThroughEncodingSuite.scala
index b6f0b5e..1715e7a 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/PassThroughEncodingSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/columnar/compression/PassThroughEncodingSuite.scala
@@ -46,7 +46,7 @@ class PassThroughSuite extends SparkFunSuite {
 
       val builder = TestCompressibleColumnBuilder(columnStats, columnType, 
PassThrough)
 
-      input.map { value =>
+      input.foreach { value =>
         val row = new GenericInternalRow(1)
         columnType.setField(row, 0, value)
         builder.appendFrom(row, 0)
@@ -98,7 +98,7 @@ class PassThroughSuite extends SparkFunSuite {
       val row = new GenericInternalRow(1)
       val nullRow = new GenericInternalRow(1)
       nullRow.setNullAt(0)
-      input.map { value =>
+      input.foreach { value =>
         if (value == nullValue) {
           builder.appendFrom(nullRow, 0)
         } else {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to