This is an automated email from the ASF dual-hosted git repository.
lzljs3620320 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/paimon.git
The following commit(s) were added to refs/heads/master by this push:
new 8efbc4b0d1 [spark] Update scalafmt version to 3.10.2 (#6709)
8efbc4b0d1 is described below
commit 8efbc4b0d147c58127ac77702ab598b70a85e77b
Author: Zouxxyy <[email protected]>
AuthorDate: Tue Dec 2 11:51:47 2025 +0800
[spark] Update scalafmt version to 3.10.2 (#6709)
---
.scalafmt.conf | 2 +-
.../spark/sql/execution/datasources/v2/MergeRowsExec.scala | 3 ++-
.../src/main/scala/org/apache/paimon/spark/PaimonScan.scala | 4 +---
.../spark/sql/execution/datasources/v2/MergeRowsExec.scala | 3 ++-
.../spark/sql/execution/datasources/v2/MergeRowsExec.scala | 3 ++-
.../org/apache/paimon/spark/DataEvolutionSparkTableWrite.scala | 3 ++-
.../org/apache/paimon/spark/PaimonPartitionManagement.scala | 3 ++-
.../src/main/scala/org/apache/paimon/spark/PaimonScan.scala | 4 +---
.../src/main/scala/org/apache/paimon/spark/SparkTableWrite.scala | 3 ++-
.../paimon/spark/catalyst/analysis/PaimonMergeIntoResolver.scala | 3 ++-
.../org/apache/paimon/spark/commands/PaimonSparkWriter.scala | 3 ++-
.../org/apache/paimon/spark/commands/SparkDataFileMeta.scala | 5 ++---
.../scala/org/apache/paimon/spark/execution/PaimonStrategy.scala | 3 ++-
.../paimon/spark/procedure/SparkRemoveUnexistingFiles.scala | 3 ++-
.../scala/org/apache/paimon/spark/write/BaseWriteBuilder.scala | 9 ++++-----
.../scala/org/apache/paimon/spark/write/DataWriteHelper.scala | 4 +---
.../extensions/AbstractPaimonSparkSqlExtensionsParser.scala | 3 ++-
.../org/apache/paimon/spark/procedure/BranchProcedureTest.scala | 3 ++-
.../paimon/spark/procedure/ExpireSnapshotsProcedureTest.scala | 3 ++-
.../paimon/spark/procedure/MigrateDatabaseProcedureTest.scala | 3 +--
.../org/apache/paimon/spark/sql/BucketedTableQueryTest.scala | 4 +---
.../src/test/scala/org/apache/paimon/spark/sql/DDLTestBase.scala | 3 +--
.../scala/org/apache/paimon/spark/sql/DeletionVectorTest.scala | 6 ++----
.../org/apache/paimon/spark/sql/TableValuedFunctionsTest.scala | 3 ++-
.../src/test/scala/org/apache/spark/sql/paimon/Utils.scala | 4 +---
pom.xml | 2 +-
26 files changed, 45 insertions(+), 47 deletions(-)
diff --git a/.scalafmt.conf b/.scalafmt.conf
index 6335c0e206..38242904b7 100644
--- a/.scalafmt.conf
+++ b/.scalafmt.conf
@@ -1,7 +1,7 @@
runner.dialect = scala212
# Version is required to make sure IntelliJ picks the right version
-version = 3.4.3
+version = 3.10.2
preset = default
# Max column
diff --git
a/paimon-spark/paimon-spark-3.2/src/main/scala/org/apache/spark/sql/execution/datasources/v2/MergeRowsExec.scala
b/paimon-spark/paimon-spark-3.2/src/main/scala/org/apache/spark/sql/execution/datasources/v2/MergeRowsExec.scala
index b830b6b6fe..9e66661b1c 100644
---
a/paimon-spark/paimon-spark-3.2/src/main/scala/org/apache/spark/sql/execution/datasources/v2/MergeRowsExec.scala
+++
b/paimon-spark/paimon-spark-3.2/src/main/scala/org/apache/spark/sql/execution/datasources/v2/MergeRowsExec.scala
@@ -89,7 +89,8 @@ case class MergeRowsExec(
isSourceRowPresentPred,
matchedInstructionExecs,
notMatchedInstructionExecs,
- notMatchedBySourceInstructionExecs)
+ notMatchedBySourceInstructionExecs
+ )
// null indicates a record must be discarded
mergeIterator.filter(_ != null)
diff --git
a/paimon-spark/paimon-spark-3.3/src/main/scala/org/apache/paimon/spark/PaimonScan.scala
b/paimon-spark/paimon-spark-3.3/src/main/scala/org/apache/paimon/spark/PaimonScan.scala
index a451df1e19..0fb515ac2c 100644
---
a/paimon-spark/paimon-spark-3.3/src/main/scala/org/apache/paimon/spark/PaimonScan.scala
+++
b/paimon-spark/paimon-spark-3.3/src/main/scala/org/apache/paimon/spark/PaimonScan.scala
@@ -78,9 +78,7 @@ case class PaimonScan(
}
}
- /**
- * Extract the bucket number from the splits only if all splits have the
same totalBuckets number.
- */
+ /** Extract the bucket number from the splits only if all splits have the
same totalBuckets number. */
private def extractBucketNumber(): Option[Int] = {
val splits = getOriginSplits
if (splits.exists(!_.isInstanceOf[DataSplit])) {
diff --git
a/paimon-spark/paimon-spark-3.3/src/main/scala/org/apache/spark/sql/execution/datasources/v2/MergeRowsExec.scala
b/paimon-spark/paimon-spark-3.3/src/main/scala/org/apache/spark/sql/execution/datasources/v2/MergeRowsExec.scala
index b830b6b6fe..9e66661b1c 100644
---
a/paimon-spark/paimon-spark-3.3/src/main/scala/org/apache/spark/sql/execution/datasources/v2/MergeRowsExec.scala
+++
b/paimon-spark/paimon-spark-3.3/src/main/scala/org/apache/spark/sql/execution/datasources/v2/MergeRowsExec.scala
@@ -89,7 +89,8 @@ case class MergeRowsExec(
isSourceRowPresentPred,
matchedInstructionExecs,
notMatchedInstructionExecs,
- notMatchedBySourceInstructionExecs)
+ notMatchedBySourceInstructionExecs
+ )
// null indicates a record must be discarded
mergeIterator.filter(_ != null)
diff --git
a/paimon-spark/paimon-spark-3.4/src/main/scala/org/apache/spark/sql/execution/datasources/v2/MergeRowsExec.scala
b/paimon-spark/paimon-spark-3.4/src/main/scala/org/apache/spark/sql/execution/datasources/v2/MergeRowsExec.scala
index b830b6b6fe..9e66661b1c 100644
---
a/paimon-spark/paimon-spark-3.4/src/main/scala/org/apache/spark/sql/execution/datasources/v2/MergeRowsExec.scala
+++
b/paimon-spark/paimon-spark-3.4/src/main/scala/org/apache/spark/sql/execution/datasources/v2/MergeRowsExec.scala
@@ -89,7 +89,8 @@ case class MergeRowsExec(
isSourceRowPresentPred,
matchedInstructionExecs,
notMatchedInstructionExecs,
- notMatchedBySourceInstructionExecs)
+ notMatchedBySourceInstructionExecs
+ )
// null indicates a record must be discarded
mergeIterator.filter(_ != null)
diff --git
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/DataEvolutionSparkTableWrite.scala
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/DataEvolutionSparkTableWrite.scala
index 0ca68e4933..4b86eb1fe1 100644
---
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/DataEvolutionSparkTableWrite.scala
+++
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/DataEvolutionSparkTableWrite.scala
@@ -155,7 +155,8 @@ case class DataEvolutionSparkTableWrite(
java.util.Arrays.asList(dataFileMeta),
Collections.emptyList(),
Collections.emptyList()),
- CompactIncrement.emptyIncrement())
+ CompactIncrement.emptyIncrement()
+ )
} finally {
recordWriter.close()
}
diff --git
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/PaimonPartitionManagement.scala
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/PaimonPartitionManagement.scala
index 193ec55d3b..36ab850d29 100644
---
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/PaimonPartitionManagement.scala
+++
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/PaimonPartitionManagement.scala
@@ -50,7 +50,8 @@ trait PaimonPartitionManagement extends
SupportsAtomicPartitionManagement {
fileStoreTable.coreOptions().partitionDefaultName(),
partitionRowType,
table.partitionKeys().asScala.toArray,
- CoreOptions.fromMap(table.options()).legacyPartitionName)
+ CoreOptions.fromMap(table.options()).legacyPartitionName
+ )
rows.map {
r =>
diff --git
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/PaimonScan.scala
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/PaimonScan.scala
index fe3efcea11..08f0029989 100644
---
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/PaimonScan.scala
+++
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/PaimonScan.scala
@@ -131,9 +131,7 @@ abstract class PaimonScanCommon(
}
}
- /**
- * Extract the bucket number from the splits only if all splits have the
same totalBuckets number.
- */
+ /** Extract the bucket number from the splits only if all splits have the
same totalBuckets number. */
private def extractBucketNumber(): Option[Int] = {
val splits = getOriginSplits
if (splits.exists(!_.isInstanceOf[DataSplit])) {
diff --git
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/SparkTableWrite.scala
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/SparkTableWrite.scala
index 4ce281b836..6d92f5a49e 100644
---
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/SparkTableWrite.scala
+++
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/SparkTableWrite.scala
@@ -89,7 +89,8 @@ case class SparkTableWrite(
message.bucket(),
postponePartitionBucketComputer.get.apply(message.partition()),
message.newFilesIncrement(),
- message.compactIncrement())
+ message.compactIncrement()
+ )
} else {
message
}
diff --git
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/catalyst/analysis/PaimonMergeIntoResolver.scala
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/catalyst/analysis/PaimonMergeIntoResolver.scala
index 4525393bd2..78ee8ec217 100644
---
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/catalyst/analysis/PaimonMergeIntoResolver.scala
+++
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/catalyst/analysis/PaimonMergeIntoResolver.scala
@@ -53,7 +53,8 @@ object PaimonMergeIntoResolver extends
PaimonMergeIntoResolverBase {
mergeCondition = resolvedCond,
matchedActions = resolvedMatched,
notMatchedActions = resolvedNotMatched,
- notMatchedBySourceActions = resolvedNotMatchedBySource)
+ notMatchedBySourceActions = resolvedNotMatchedBySource
+ )
}
}
diff --git
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/commands/PaimonSparkWriter.scala
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/commands/PaimonSparkWriter.scala
index 515464ef14..23690b46eb 100644
---
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/commands/PaimonSparkWriter.scala
+++
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/commands/PaimonSparkWriter.scala
@@ -393,7 +393,8 @@ case class PaimonSparkWriter(
java.util.Collections.emptyList(),
java.util.Collections.emptyList(),
added.map(_.indexFile).asJava,
- deleted.map(_.indexFile).asJava),
+ deleted.map(_.indexFile).asJava
+ ),
CompactIncrement.emptyIncrement()
)
val serializer = new CommitMessageSerializer
diff --git
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/commands/SparkDataFileMeta.scala
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/commands/SparkDataFileMeta.scala
index 921d2e4735..5a9f68328c 100644
---
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/commands/SparkDataFileMeta.scala
+++
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/commands/SparkDataFileMeta.scala
@@ -71,9 +71,8 @@ object SparkDataFileMeta {
.groupBy(file => (file.partition, file.bucket))
.map {
case ((partition, bucket), files) =>
- val (dataFiles, deletionFiles) = files.map {
- file => (file.dataFileMeta, file.deletionFile.orNull)
- }.unzip
+ val (dataFiles, deletionFiles) =
+ files.map(file => (file.dataFileMeta,
file.deletionFile.orNull)).unzip
new DataSplit.Builder()
.withPartition(partition)
.withBucket(bucket)
diff --git
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/execution/PaimonStrategy.scala
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/execution/PaimonStrategy.scala
index 274909bf5e..6f21aa23d8 100644
---
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/execution/PaimonStrategy.scala
+++
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/execution/PaimonStrategy.scala
@@ -78,7 +78,8 @@ case class PaimonStrategy(spark: SparkSession)
comment,
properties,
allowExisting,
- replace) =>
+ replace
+ ) =>
CreatePaimonViewExec(
viewCatalog,
ident,
diff --git
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/procedure/SparkRemoveUnexistingFiles.scala
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/procedure/SparkRemoveUnexistingFiles.scala
index 5361d4eafa..1c27fc1d2a 100644
---
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/procedure/SparkRemoveUnexistingFiles.scala
+++
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/procedure/SparkRemoveUnexistingFiles.scala
@@ -71,7 +71,8 @@ case class SparkRemoveUnexistingFiles(
Collections.emptyList(),
new util.ArrayList[DataFileMeta](metaMap.values()),
Collections.emptyList()),
- CompactIncrement.emptyIncrement())
+ CompactIncrement.emptyIncrement()
+ )
(metaMap.keySet().asScala.toSeq,
serializer.serialize(message))
}
})
diff --git
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/write/BaseWriteBuilder.scala
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/write/BaseWriteBuilder.scala
index f76cc969c2..3456f078a4 100644
---
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/write/BaseWriteBuilder.scala
+++
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/write/BaseWriteBuilder.scala
@@ -64,13 +64,12 @@ abstract class BaseWriteBuilder(table: Table)
// `dataframe.writeTo(T).overwrite(...)`
val partitionNames = partitionRowType.getFieldNames.asScala
val allReferences = filters.flatMap(_.references)
- val containsDataColumn = allReferences.exists {
- reference => !partitionNames.exists(conf.resolver.apply(reference, _))
- }
+ val containsDataColumn =
+ allReferences.exists(reference =>
!partitionNames.exists(conf.resolver.apply(reference, _)))
if (containsDataColumn) {
throw new RuntimeException(
- s"Only support Overwrite filters on partition column
${partitionNames.mkString(
- ", ")}, but got ${filters.mkString(", ")}.")
+ s"Only support Overwrite filters on partition column
${partitionNames.mkString(", ")}, " +
+ s"but got ${filters.mkString(", ")}.")
}
if (allReferences.distinct.length < allReferences.length) {
// fail with `part = 1 and part = 2`
diff --git
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/write/DataWriteHelper.scala
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/write/DataWriteHelper.scala
index 6334eebe69..287d299d64 100644
---
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/write/DataWriteHelper.scala
+++
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/paimon/spark/write/DataWriteHelper.scala
@@ -31,9 +31,7 @@ trait DataWriteHelper extends Logging {
val fullCompactionDeltaCommits: Option[Int]
- /**
- * For batch write, batchId is -1, for streaming write, batchId is the
current batch id (>= 0).
- */
+ /** For batch write, batchId is -1, for streaming write, batchId is the
current batch id (>= 0). */
val batchId: Long
private val needFullCompaction: Boolean = {
diff --git
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/spark/sql/catalyst/parser/extensions/AbstractPaimonSparkSqlExtensionsParser.scala
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/spark/sql/catalyst/parser/extensions/AbstractPaimonSparkSqlExtensionsParser.scala
index 2e84df8027..f5b3d89a67 100644
---
a/paimon-spark/paimon-spark-common/src/main/scala/org/apache/spark/sql/catalyst/parser/extensions/AbstractPaimonSparkSqlExtensionsParser.scala
+++
b/paimon-spark/paimon-spark-common/src/main/scala/org/apache/spark/sql/catalyst/parser/extensions/AbstractPaimonSparkSqlExtensionsParser.scala
@@ -225,7 +225,8 @@ case object PaimonSqlExtensionsPostProcessor extends
PaimonSqlExtensionsBaseList
PaimonSqlExtensionsParser.IDENTIFIER,
token.getChannel,
token.getStartIndex + stripMargins,
- token.getStopIndex - stripMargins)
+ token.getStopIndex - stripMargins
+ )
parent.addChild(new TerminalNodeImpl(f(newToken)))
}
}
diff --git
a/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/procedure/BranchProcedureTest.scala
b/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/procedure/BranchProcedureTest.scala
index 735806b5a6..67786a47fe 100644
---
a/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/procedure/BranchProcedureTest.scala
+++
b/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/procedure/BranchProcedureTest.scala
@@ -172,7 +172,8 @@ class BranchProcedureTest extends PaimonSparkTestBase with
StreamTest {
Row("20240726", "cherry", 3),
Row("20240725", "apple", 5),
Row("20240726", "pear", 6),
- Row("20240725", "banana", 7)))
+ Row("20240725", "banana", 7))
+ )
sql("ALTER TABLE T UNSET TBLPROPERTIES ('scan.fallback-branch')")
checkAnswer(
diff --git
a/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/procedure/ExpireSnapshotsProcedureTest.scala
b/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/procedure/ExpireSnapshotsProcedureTest.scala
index b39aa5d058..aa65d8b9c3 100644
---
a/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/procedure/ExpireSnapshotsProcedureTest.scala
+++
b/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/procedure/ExpireSnapshotsProcedureTest.scala
@@ -263,7 +263,8 @@ class ExpireSnapshotsProcedureTest extends
PaimonSparkTestBase with StreamTest {
checkAnswer(
spark.sql(
"CALL paimon.sys.expire_snapshots(table => 'test.T', options
=> 'snapshot.num-retained.max=2, snapshot.num-retained.min=1')"),
- Row(1L) :: Nil)
+ Row(1L) :: Nil
+ )
checkAnswer(
spark.sql("SELECT snapshot_id FROM paimon.test.`T$snapshots`"),
diff --git
a/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/procedure/MigrateDatabaseProcedureTest.scala
b/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/procedure/MigrateDatabaseProcedureTest.scala
index a5a164d1bc..b583c59136 100644
---
a/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/procedure/MigrateDatabaseProcedureTest.scala
+++
b/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/procedure/MigrateDatabaseProcedureTest.scala
@@ -72,8 +72,7 @@ class MigrateDatabaseProcedureTest extends PaimonHiveTestBase
{
Seq("parquet", "orc", "avro").foreach(
format => {
- test(
- s"Paimon migrate database procedure: migrate $format database with
setting parallelism") {
+ test(s"Paimon migrate database procedure: migrate $format database with
setting parallelism") {
withTable(s"hive_tbl_01$random", s"hive_tbl_02$random") {
// create hive table
spark.sql(s"""
diff --git
a/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/sql/BucketedTableQueryTest.scala
b/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/sql/BucketedTableQueryTest.scala
index 3f87f8ec6f..16b932ed7a 100644
---
a/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/sql/BucketedTableQueryTest.scala
+++
b/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/sql/BucketedTableQueryTest.scala
@@ -228,9 +228,7 @@ class BucketedTableQueryTest extends PaimonSparkTestBase
with AdaptiveSparkPlanH
checkAnswerAndShuffleSorts("SELECT id, max(c) FROM t GROUP BY id", 0, 0)
// generate some files
- (1.to(20)).foreach {
- i => spark.sql(s"INSERT INTO t VALUES ($i, 'x1'), ($i, 'x3'), ($i,
'x3')")
- }
+ (1.to(20)).foreach(i => spark.sql(s"INSERT INTO t VALUES ($i, 'x1'),
($i, 'x3'), ($i, 'x3')"))
checkAnswerAndShuffleSorts("SELECT id, max(c) FROM t GROUP BY id", 0, 1)
}
}
diff --git
a/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/sql/DDLTestBase.scala
b/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/sql/DDLTestBase.scala
index 2bf9a70810..40a992d1a3 100644
---
a/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/sql/DDLTestBase.scala
+++
b/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/sql/DDLTestBase.scala
@@ -398,8 +398,7 @@ abstract class DDLTestBase extends PaimonSparkTestBase {
test("Paimon DDL: select table with timestamp and timestamp_ntz with
filter") {
Seq(true, false).foreach {
datetimeJava8APIEnabled =>
- withSparkSQLConf(
- "spark.sql.datetime.java8API.enabled" ->
datetimeJava8APIEnabled.toString) {
+ withSparkSQLConf("spark.sql.datetime.java8API.enabled" ->
datetimeJava8APIEnabled.toString) {
withTable("paimon_tbl") {
// Spark support create table with timestamp_ntz since 3.4
if (gteqSpark3_4) {
diff --git
a/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/sql/DeletionVectorTest.scala
b/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/sql/DeletionVectorTest.scala
index e3a5896ab1..2eb9aa253e 100644
---
a/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/sql/DeletionVectorTest.scala
+++
b/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/sql/DeletionVectorTest.scala
@@ -149,8 +149,7 @@ class DeletionVectorTest extends PaimonSparkTestBase with
AdaptiveSparkPlanHelpe
bucketModes.foreach {
bucket =>
- test(
- s"Paimon DeletionVector: update for append non-partitioned table with
bucket = $bucket") {
+ test(s"Paimon DeletionVector: update for append non-partitioned table
with bucket = $bucket") {
withTable("T") {
val bucketKey = if (bucket > 1) {
", 'bucket-key' = 'id'"
@@ -309,8 +308,7 @@ class DeletionVectorTest extends PaimonSparkTestBase with
AdaptiveSparkPlanHelpe
bucketModes.foreach {
bucket =>
- test(
- s"Paimon DeletionVector: delete for append non-partitioned table with
bucket = $bucket") {
+ test(s"Paimon DeletionVector: delete for append non-partitioned table
with bucket = $bucket") {
withTable("T") {
val bucketKey = if (bucket > 1) {
", 'bucket-key' = 'id'"
diff --git
a/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/sql/TableValuedFunctionsTest.scala
b/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/sql/TableValuedFunctionsTest.scala
index b11f5877c5..ecddb8ac63 100644
---
a/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/sql/TableValuedFunctionsTest.scala
+++
b/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/paimon/spark/sql/TableValuedFunctionsTest.scala
@@ -137,7 +137,8 @@ class TableValuedFunctionsTest extends PaimonHiveTestBase {
checkAnswer(
sql(
s"SELECT * FROM
paimon_incremental_between_timestamp('$catalogName.$dbName.t', '$t1String',
'$t3String') ORDER BY id"),
- Seq(Row(2), Row(3), Row(4)))
+ Seq(Row(2), Row(3), Row(4))
+ )
}
}
}
diff --git
a/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/spark/sql/paimon/Utils.scala
b/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/spark/sql/paimon/Utils.scala
index b5700ea8ef..0a0cc01e72 100644
---
a/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/spark/sql/paimon/Utils.scala
+++
b/paimon-spark/paimon-spark-ut/src/test/scala/org/apache/spark/sql/paimon/Utils.scala
@@ -23,9 +23,7 @@ import org.apache.spark.util.{Utils => SparkUtils}
import java.io.File
-/**
- * A wrapper that some Objects or Classes is limited to access beyond
[[org.apache.spark]] package.
- */
+/** A wrapper that some Objects or Classes is limited to access beyond
[[org.apache.spark]] package. */
object Utils {
def createTempDir: File =
SparkUtils.createTempDir(System.getProperty("java.io.tmpdir"), "spark")
diff --git a/pom.xml b/pom.xml
index 2ef51944f9..6abdb16219 100644
--- a/pom.xml
+++ b/pom.xml
@@ -997,7 +997,7 @@ under the License.
<toggleOffOn />
<scalafmt>
- <version>3.4.3</version>
+ <version>3.10.2</version>
<file>${maven.multiModuleProjectDirectory}/.scalafmt.conf</file>
</scalafmt>