This is an automated email from the ASF dual-hosted git repository.

dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 7de77e18c32 [MINOR][CONNECT][PYTHON] Miscellaneous cleanup in Spark 
Connect componenet
7de77e18c32 is described below

commit 7de77e18c329ae704d9512183f366bc17b5483e6
Author: Hyukjin Kwon <gurwls...@apache.org>
AuthorDate: Thu Oct 6 00:49:05 2022 -0700

    [MINOR][CONNECT][PYTHON] Miscellaneous cleanup in Spark Connect componenet
    
    ### What changes were proposed in this pull request?
    
    This PR proposes:
    1. Fix the code style in `SparkConnectPlugin.scala` and `SparkBuild.scala` 
to be consistent with others.
    2. Rename `data_frame` to `dataframe` to be consistent with existing 
PySpark codes.
    
    This Pr is a sort of a followup of 
https://github.com/apache/spark/pull/37710
    
    ### Why are the changes needed?
    
    To follow existing codebase, and style.
    
    ### Does this PR introduce _any_ user-facing change?
    
    No, the codes are not released yet. The only notable change would be 
renaming `data_frame` to `dataframe` to be consistent.
    
    ### How was this patch tested?
    
    Ci in this PR should validate the changes.
    
    Closes #38121 from HyukjinKwon/minor-cleanup.
    
    Authored-by: Hyukjin Kwon <gurwls...@apache.org>
    Signed-off-by: Dongjoon Hyun <dongj...@apache.org>
---
 .../spark/sql/connect/SparkConnectPlugin.scala       |  4 ++--
 project/SparkBuild.scala                             | 20 ++++++++++----------
 python/pyspark/sql/connect/__init__.py               |  2 +-
 python/pyspark/sql/connect/client.py                 |  2 +-
 .../sql/connect/{data_frame.py => dataframe.py}      |  0
 python/pyspark/sql/connect/readwriter.py             |  2 +-
 6 files changed, 15 insertions(+), 15 deletions(-)

diff --git 
a/connector/connect/src/main/scala/org/apache/spark/sql/connect/SparkConnectPlugin.scala
 
b/connector/connect/src/main/scala/org/apache/spark/sql/connect/SparkConnectPlugin.scala
index d262947015c..7ac33fa9324 100644
--- 
a/connector/connect/src/main/scala/org/apache/spark/sql/connect/SparkConnectPlugin.scala
+++ 
b/connector/connect/src/main/scala/org/apache/spark/sql/connect/SparkConnectPlugin.scala
@@ -44,8 +44,8 @@ class SparkConnectPlugin extends SparkPlugin {
   override def driverPlugin(): DriverPlugin = new DriverPlugin {
 
     override def init(
-                       sc: SparkContext,
-                       pluginContext: PluginContext): util.Map[String, String] 
= {
+        sc: SparkContext,
+        pluginContext: PluginContext): util.Map[String, String] = {
       SparkConnectService.start()
       Map.empty[String, String].asJava
     }
diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index 6ffc1d880c5..1de8bc6a47d 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -624,17 +624,17 @@ object SparkConnect {
     // For some reason the resolution from the imported Maven build does not 
work for some
     // of these dependendencies that we need to shade later on.
     libraryDependencies ++= Seq(
-      "io.grpc"             % "protoc-gen-grpc-java" % 
BuildCommons.gprcVersion asProtocPlugin(),
-      "org.scala-lang"      % "scala-library"        % "2.12.16" % "provided",
-      "com.google.guava"    % "guava"                % "31.0.1-jre",
-      "com.google.guava"    % "failureaccess"        % "1.0.1",
-      "com.google.protobuf" % "protobuf-java"        % protoVersion % 
"protobuf"
+      "io.grpc" % "protoc-gen-grpc-java" % BuildCommons.gprcVersion 
asProtocPlugin(),
+      "org.scala-lang" % "scala-library" % "2.12.16" % "provided",
+      "com.google.guava" % "guava" % "31.0.1-jre",
+      "com.google.guava" % "failureaccess" % "1.0.1",
+      "com.google.protobuf" % "protobuf-java" % protoVersion % "protobuf"
     ),
 
     dependencyOverrides ++= Seq(
-      "com.google.guava"    % "guava"                % "31.0.1-jre",
-      "com.google.guava"    % "failureaccess"        % "1.0.1",
-      "com.google.protobuf" % "protobuf-java"        % protoVersion
+      "com.google.guava" % "guava" % "31.0.1-jre",
+      "com.google.guava" % "failureaccess" % "1.0.1",
+      "com.google.protobuf" % "protobuf-java" % protoVersion
     ),
 
     (Compile / PB.targets) := Seq(
@@ -642,7 +642,7 @@ object SparkConnect {
       PB.gens.plugin("grpc-java") -> (Compile / sourceManaged).value
     ),
 
-    (assembly / test) := false,
+    (assembly / test) := { },
 
     (assembly / logLevel) := Level.Info,
 
@@ -1195,7 +1195,7 @@ object CopyDependencies {
       // For the SparkConnect build, we manually call the assembly target to
       // produce the shaded Jar which happens automatically in the case of 
Maven.
       // Later, when the dependencies are copied, we manually copy the shaded 
Jar only.
-      val fid = (LocalProject("connect")/assembly).value
+      val fid = (LocalProject("connect") / assembly).value
 
       (Compile / dependencyClasspath).value.map(_.data)
         .filter { jar => jar.isFile() }
diff --git a/python/pyspark/sql/connect/__init__.py 
b/python/pyspark/sql/connect/__init__.py
index c748f8f6590..3df96963f92 100644
--- a/python/pyspark/sql/connect/__init__.py
+++ b/python/pyspark/sql/connect/__init__.py
@@ -19,4 +19,4 @@
 Spark through this API are can be changed at any time without warning."""
 
 
-from pyspark.sql.connect.data_frame import DataFrame  # noqa: F401
+from pyspark.sql.connect.dataframe import DataFrame  # noqa: F401
diff --git a/python/pyspark/sql/connect/client.py 
b/python/pyspark/sql/connect/client.py
index 3aa335d35c0..cf825f88e85 100644
--- a/python/pyspark/sql/connect/client.py
+++ b/python/pyspark/sql/connect/client.py
@@ -30,7 +30,7 @@ import pyspark.sql.connect.proto as pb2
 import pyspark.sql.connect.proto.base_pb2_grpc as grpc_lib
 import pyspark.sql.types
 from pyspark import cloudpickle
-from pyspark.sql.connect.data_frame import DataFrame
+from pyspark.sql.connect.dataframe import DataFrame
 from pyspark.sql.connect.readwriter import DataFrameReader
 from pyspark.sql.connect.plan import SQL
 
diff --git a/python/pyspark/sql/connect/data_frame.py 
b/python/pyspark/sql/connect/dataframe.py
similarity index 100%
rename from python/pyspark/sql/connect/data_frame.py
rename to python/pyspark/sql/connect/dataframe.py
diff --git a/python/pyspark/sql/connect/readwriter.py 
b/python/pyspark/sql/connect/readwriter.py
index fb1e6da36f5..285e78e59ae 100644
--- a/python/pyspark/sql/connect/readwriter.py
+++ b/python/pyspark/sql/connect/readwriter.py
@@ -15,7 +15,7 @@
 # limitations under the License.
 #
 
-from pyspark.sql.connect.data_frame import DataFrame
+from pyspark.sql.connect.dataframe import DataFrame
 from pyspark.sql.connect.plan import Read
 
 from typing import TYPE_CHECKING


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to