This is an automated email from the ASF dual-hosted git repository.
huaxingao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new 00d099c8ce8 [MINOR] whitespace must between Curly braces and the
bodies of classes, methods
00d099c8ce8 is described below
commit 00d099c8ce8f0711a4c6057613bf90d9ece033b9
Author: panbingkun <[email protected]>
AuthorDate: Sat Jul 2 20:36:53 2022 -0700
[MINOR] whitespace must between Curly braces and the bodies of classes,
methods
### What changes were proposed in this pull request?
general and consistent code style
### Why are the changes needed?
whitespace must between Curly braces and the bodies of classes, methods
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
Pass GA
Closes #37057 from panbingkun/minor-code-style.
Authored-by: panbingkun <[email protected]>
Signed-off-by: huaxingao <[email protected]>
---
core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala | 2 +-
core/src/main/scala/org/apache/spark/api/python/SerDeUtil.scala | 2 +-
core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala | 2 +-
.../test/scala/org/apache/spark/metrics/MetricsSystemSuite.scala | 2 +-
.../org/apache/spark/shuffle/HostLocalShuffleReadingSuite.scala | 6 +++---
.../apache/spark/util/collection/ExternalAppendOnlyMapSuite.scala | 2 +-
.../apache/spark/ml/regression/GeneralizedLinearRegression.scala | 2 +-
.../org/apache/spark/mllib/pmml/export/KMeansPMMLModelExport.scala | 2 +-
.../src/test/scala/org/apache/spark/ml/feature/InstanceSuite.scala | 2 +-
.../org/apache/spark/ml/regression/RandomForestRegressorSuite.scala | 2 +-
.../org/apache/spark/ml/tuning/TrainValidationSplitSuite.scala | 2 +-
.../org/apache/spark/deploy/mesos/MesosClusterDispatcherSuite.scala | 2 +-
.../scala/org/apache/spark/sql/catalyst/expressions/SortOrder.scala | 4 ++--
.../apache/spark/sql/execution/datasources/PartitioningUtils.scala | 2 +-
.../sql/execution/datasources/v2/parquet/ParquetScanBuilder.scala | 2 +-
.../src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala | 2 +-
.../scala/org/apache/spark/sql/DataFrameWindowFunctionsSuite.scala | 2 +-
sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala | 2 +-
.../spark/sql/execution/datasources/FileFormatWriterSuite.scala | 2 +-
.../org/apache/spark/sql/execution/ui/SparkPlanInfoSuite.scala | 2 +-
.../org/apache/spark/streaming/api/java/JavaStreamingListener.scala | 2 +-
21 files changed, 24 insertions(+), 24 deletions(-)
diff --git a/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala
b/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala
index 33f2b18cb27..d11d2e7a4f6 100644
--- a/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala
+++ b/core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala
@@ -697,7 +697,7 @@ private[spark] class PythonAccumulatorV2(
@transient private val serverHost: String,
private val serverPort: Int,
private val secretToken: String)
- extends CollectionAccumulator[Array[Byte]] with Logging{
+ extends CollectionAccumulator[Array[Byte]] with Logging {
Utils.checkHost(serverHost)
diff --git a/core/src/main/scala/org/apache/spark/api/python/SerDeUtil.scala
b/core/src/main/scala/org/apache/spark/api/python/SerDeUtil.scala
index dd962ca11ec..a2a7fb5c100 100644
--- a/core/src/main/scala/org/apache/spark/api/python/SerDeUtil.scala
+++ b/core/src/main/scala/org/apache/spark/api/python/SerDeUtil.scala
@@ -48,7 +48,7 @@ private[spark] object SerDeUtil extends Logging {
// This should be called before trying to unpickle array.array from Python
// In cluster mode, this should be put in closure
def initialize(): Unit = {
- synchronized{
+ synchronized {
if (!initialized) {
Unpickler.registerConstructor("__builtin__", "bytearray", new
ByteArrayConstructor())
Unpickler.registerConstructor("builtins", "bytearray", new
ByteArrayConstructor())
diff --git a/core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala
b/core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala
index d3a061fae74..64a786e5825 100644
--- a/core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala
+++ b/core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala
@@ -47,7 +47,7 @@ private[spark] class ConsoleProgressBar(sc: SparkContext)
extends Logging {
// Schedule a refresh thread to run periodically
private val timer = new Timer("refresh progress", true)
- timer.schedule(new TimerTask{
+ timer.schedule(new TimerTask {
override def run(): Unit = {
refresh()
}
diff --git
a/core/src/test/scala/org/apache/spark/metrics/MetricsSystemSuite.scala
b/core/src/test/scala/org/apache/spark/metrics/MetricsSystemSuite.scala
index 0d4be5b1d33..80dc4ff7586 100644
--- a/core/src/test/scala/org/apache/spark/metrics/MetricsSystemSuite.scala
+++ b/core/src/test/scala/org/apache/spark/metrics/MetricsSystemSuite.scala
@@ -30,7 +30,7 @@ import org.apache.spark.internal.config._
import org.apache.spark.metrics.sink.Sink
import org.apache.spark.metrics.source.{Source, StaticSources}
-class MetricsSystemSuite extends SparkFunSuite with BeforeAndAfter with
PrivateMethodTester{
+class MetricsSystemSuite extends SparkFunSuite with BeforeAndAfter with
PrivateMethodTester {
var filePath: String = _
var conf: SparkConf = null
var securityMgr: SecurityManager = null
diff --git
a/core/src/test/scala/org/apache/spark/shuffle/HostLocalShuffleReadingSuite.scala
b/core/src/test/scala/org/apache/spark/shuffle/HostLocalShuffleReadingSuite.scala
index 4e74036e113..571f57a6d6f 100644
---
a/core/src/test/scala/org/apache/spark/shuffle/HostLocalShuffleReadingSuite.scala
+++
b/core/src/test/scala/org/apache/spark/shuffle/HostLocalShuffleReadingSuite.scala
@@ -38,13 +38,13 @@ class HostLocalShuffleReadingSuite extends SparkFunSuite
with Matchers with Loca
override def afterEach(): Unit = {
Option(rpcHandler).foreach { handler =>
- Utils.tryLogNonFatalError{
+ Utils.tryLogNonFatalError {
server.close()
}
- Utils.tryLogNonFatalError{
+ Utils.tryLogNonFatalError {
handler.close()
}
- Utils.tryLogNonFatalError{
+ Utils.tryLogNonFatalError {
transportContext.close()
}
server = null
diff --git
a/core/src/test/scala/org/apache/spark/util/collection/ExternalAppendOnlyMapSuite.scala
b/core/src/test/scala/org/apache/spark/util/collection/ExternalAppendOnlyMapSuite.scala
index 3f6cb2475af..4b63f1dacef 100644
---
a/core/src/test/scala/org/apache/spark/util/collection/ExternalAppendOnlyMapSuite.scala
+++
b/core/src/test/scala/org/apache/spark/util/collection/ExternalAppendOnlyMapSuite.scala
@@ -34,7 +34,7 @@ import org.apache.spark.util.CompletionIterator
class ExternalAppendOnlyMapSuite extends SparkFunSuite
with LocalSparkContext
with Eventually
- with Matchers{
+ with Matchers {
import TestUtils.{assertNotSpilled, assertSpilled}
private val allCompressionCodecs = CompressionCodec.ALL_COMPRESSION_CODECS
diff --git
a/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala
b/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala
index 6d8507239eb..bbe6a9fce44 100644
---
a/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala
+++
b/mllib/src/main/scala/org/apache/spark/ml/regression/GeneralizedLinearRegression.scala
@@ -679,7 +679,7 @@ object GeneralizedLinearRegression extends
DefaultParamsReadable[GeneralizedLine
}
}
- private[regression] object Tweedie{
+ private[regression] object Tweedie {
/** Constant used in initialization and deviance to avoid numerical
issues. */
val delta: Double = 0.1
diff --git
a/mllib/src/main/scala/org/apache/spark/mllib/pmml/export/KMeansPMMLModelExport.scala
b/mllib/src/main/scala/org/apache/spark/mllib/pmml/export/KMeansPMMLModelExport.scala
index d043c9e58ee..d86410c1ae3 100644
---
a/mllib/src/main/scala/org/apache/spark/mllib/pmml/export/KMeansPMMLModelExport.scala
+++
b/mllib/src/main/scala/org/apache/spark/mllib/pmml/export/KMeansPMMLModelExport.scala
@@ -28,7 +28,7 @@ import org.apache.spark.mllib.clustering.KMeansModel
/**
* PMML Model Export for KMeansModel class
*/
-private[mllib] class KMeansPMMLModelExport(model: KMeansModel) extends
PMMLModelExport{
+private[mllib] class KMeansPMMLModelExport(model: KMeansModel) extends
PMMLModelExport {
populateKMeansPMML(model)
diff --git
a/mllib/src/test/scala/org/apache/spark/ml/feature/InstanceSuite.scala
b/mllib/src/test/scala/org/apache/spark/ml/feature/InstanceSuite.scala
index f1e071357ba..53be2444ecb 100644
--- a/mllib/src/test/scala/org/apache/spark/ml/feature/InstanceSuite.scala
+++ b/mllib/src/test/scala/org/apache/spark/ml/feature/InstanceSuite.scala
@@ -22,7 +22,7 @@ import org.apache.spark.internal.config.Kryo._
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.serializer.KryoSerializer
-class InstanceSuite extends SparkFunSuite{
+class InstanceSuite extends SparkFunSuite {
test("Kryo class register") {
val conf = new SparkConf(false)
conf.set(KRYO_REGISTRATION_REQUIRED, true)
diff --git
a/mllib/src/test/scala/org/apache/spark/ml/regression/RandomForestRegressorSuite.scala
b/mllib/src/test/scala/org/apache/spark/ml/regression/RandomForestRegressorSuite.scala
index 4047e6d7199..ff17be1fc53 100644
---
a/mllib/src/test/scala/org/apache/spark/ml/regression/RandomForestRegressorSuite.scala
+++
b/mllib/src/test/scala/org/apache/spark/ml/regression/RandomForestRegressorSuite.scala
@@ -33,7 +33,7 @@ import org.apache.spark.sql.{DataFrame, Row}
/**
* Test suite for [[RandomForestRegressor]].
*/
-class RandomForestRegressorSuite extends MLTest with DefaultReadWriteTest{
+class RandomForestRegressorSuite extends MLTest with DefaultReadWriteTest {
import RandomForestRegressorSuite.compareAPIs
import testImplicits._
diff --git
a/mllib/src/test/scala/org/apache/spark/ml/tuning/TrainValidationSplitSuite.scala
b/mllib/src/test/scala/org/apache/spark/ml/tuning/TrainValidationSplitSuite.scala
index 289db336eca..20ba69a5adb 100644
---
a/mllib/src/test/scala/org/apache/spark/ml/tuning/TrainValidationSplitSuite.scala
+++
b/mllib/src/test/scala/org/apache/spark/ml/tuning/TrainValidationSplitSuite.scala
@@ -321,7 +321,7 @@ class TrainValidationSplitSuite
}
}
-object TrainValidationSplitSuite extends SparkFunSuite{
+object TrainValidationSplitSuite extends SparkFunSuite {
abstract class MyModel extends Model[MyModel]
diff --git
a/resource-managers/mesos/src/test/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcherSuite.scala
b/resource-managers/mesos/src/test/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcherSuite.scala
index 7484e3b8367..e6284537402 100644
---
a/resource-managers/mesos/src/test/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcherSuite.scala
+++
b/resource-managers/mesos/src/test/scala/org/apache/spark/deploy/mesos/MesosClusterDispatcherSuite.scala
@@ -21,7 +21,7 @@ import org.apache.spark.SparkFunSuite
import org.apache.spark.deploy.TestPrematureExit
class MesosClusterDispatcherSuite extends SparkFunSuite
- with TestPrematureExit{
+ with TestPrematureExit {
test("prints usage on empty input") {
testPrematureExit(Array[String](),
diff --git
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/SortOrder.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/SortOrder.scala
index 974d4b5f868..9b52c7b07e6 100644
---
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/SortOrder.scala
+++
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/SortOrder.scala
@@ -44,11 +44,11 @@ case object Descending extends SortDirection {
override def defaultNullOrdering: NullOrdering = NullsLast
}
-case object NullsFirst extends NullOrdering{
+case object NullsFirst extends NullOrdering {
override def sql: String = "NULLS FIRST"
}
-case object NullsLast extends NullOrdering{
+case object NullsLast extends NullOrdering {
override def sql: String = "NULLS LAST"
}
diff --git
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala
index e856bb5b9c2..3cc69656bb7 100644
---
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala
+++
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala
@@ -61,7 +61,7 @@ object PartitionSpec {
val emptySpec = PartitionSpec(StructType(Seq.empty[StructField]),
Seq.empty[PartitionPath])
}
-object PartitioningUtils extends SQLConfHelper{
+object PartitioningUtils extends SQLConfHelper {
val timestampPartitionPattern = "yyyy-MM-dd HH:mm:ss[.S]"
diff --git
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/parquet/ParquetScanBuilder.scala
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/parquet/ParquetScanBuilder.scala
index 2e3b9b20b5d..39a81e6563b 100644
---
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/parquet/ParquetScanBuilder.scala
+++
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/parquet/ParquetScanBuilder.scala
@@ -39,7 +39,7 @@ case class ParquetScanBuilder(
dataSchema: StructType,
options: CaseInsensitiveStringMap)
extends FileScanBuilder(sparkSession, fileIndex, dataSchema)
- with SupportsPushDownAggregates{
+ with SupportsPushDownAggregates {
lazy val hadoopConf = {
val caseSensitiveMap = options.asCaseSensitiveMap.asScala.toMap
// Hadoop Configurations are case sensitive.
diff --git
a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
index fa2a45be185..1c4d2cf0aec 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
@@ -76,7 +76,7 @@ case class JdbcType(databaseTypeDefinition : String,
jdbcNullType : Int)
* for the given Catalyst type.
*/
@DeveloperApi
-abstract class JdbcDialect extends Serializable with Logging{
+abstract class JdbcDialect extends Serializable with Logging {
/**
* Check if this dialect instance can handle a certain jdbc url.
* @param url the jdbc url.
diff --git
a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFunctionsSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFunctionsSuite.scala
index c90fa124704..e872b6aaa64 100644
---
a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFunctionsSuite.scala
+++
b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFunctionsSuite.scala
@@ -37,7 +37,7 @@ import org.apache.spark.sql.types._
*/
class DataFrameWindowFunctionsSuite extends QueryTest
with SharedSparkSession
- with AdaptiveSparkPlanHelper{
+ with AdaptiveSparkPlanHelper {
import testImplicits._
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
index 5e5bc27ff43..15a615ce6d4 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
@@ -2089,7 +2089,7 @@ class SQLQuerySuite extends QueryTest with
SharedSparkSession with AdaptiveSpark
}
test("SPARK-15327: fail to compile generated code with complex data
structure") {
- withTempDir{ dir =>
+ withTempDir { dir =>
val json =
"""
|{"h": {"b": {"c": [{"e": "adfgd"}], "a": [{"e": "testing", "count":
3}],
diff --git
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileFormatWriterSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileFormatWriterSuite.scala
index 2a67864de8d..343b59a311e 100644
---
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileFormatWriterSuite.scala
+++
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileFormatWriterSuite.scala
@@ -24,7 +24,7 @@ import org.apache.spark.sql.test.SharedSparkSession
class FileFormatWriterSuite
extends QueryTest
with SharedSparkSession
- with CodegenInterpretedPlanTest{
+ with CodegenInterpretedPlanTest {
import testImplicits._
diff --git
a/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SparkPlanInfoSuite.scala
b/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SparkPlanInfoSuite.scala
index dfc64a41d9f..1ef07bf9ebc 100644
---
a/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SparkPlanInfoSuite.scala
+++
b/sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SparkPlanInfoSuite.scala
@@ -20,7 +20,7 @@ package org.apache.spark.sql.execution.ui
import org.apache.spark.sql.execution.SparkPlanInfo
import org.apache.spark.sql.test.SharedSparkSession
-class SparkPlanInfoSuite extends SharedSparkSession{
+class SparkPlanInfoSuite extends SharedSparkSession {
import testImplicits._
diff --git
a/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaStreamingListener.scala
b/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaStreamingListener.scala
index ce1afad7a91..733ab03f0fe 100644
---
a/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaStreamingListener.scala
+++
b/streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaStreamingListener.scala
@@ -19,7 +19,7 @@ package org.apache.spark.streaming.api.java
import org.apache.spark.streaming.Time
-private[streaming] trait PythonStreamingListener{
+private[streaming] trait PythonStreamingListener {
/** Called when the streaming has been started */
def onStreamingStarted(streamingStarted:
JavaStreamingListenerStreamingStarted): Unit = { }
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]