This is an automated email from the ASF dual-hosted git repository.

sunchao pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/arrow-datafusion-comet.git


The following commit(s) were added to refs/heads/main by this push:
     new d069713  build: Enforce scalafix in CI (#203)
d069713 is described below

commit d0697132e83004e8960c11d5a7603b4c21fbb956
Author: advancedxy <xian...@apache.org>
AuthorDate: Wed Mar 13 15:03:49 2024 +0800

    build: Enforce scalafix in CI (#203)
---
 .github/actions/java-test/action.yaml                                 | 2 +-
 DEVELOPMENT.md                                                        | 4 ++++
 Makefile                                                              | 1 +
 .../src/main/scala/org/apache/comet/CometSparkSessionExtensions.scala | 1 -
 spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala      | 2 +-
 spark/src/test/scala/org/apache/comet/CometCastSuite.scala            | 4 ++--
 spark/src/test/scala/org/apache/comet/CometExpressionSuite.scala      | 2 +-
 7 files changed, 10 insertions(+), 6 deletions(-)

diff --git a/.github/actions/java-test/action.yaml 
b/.github/actions/java-test/action.yaml
index f82f05b..e1efd9f 100644
--- a/.github/actions/java-test/action.yaml
+++ b/.github/actions/java-test/action.yaml
@@ -49,7 +49,7 @@ runs:
     - name: Run Maven compile
       shell: bash
       run: |
-        ./mvnw -B compile test-compile scalafix:scalafix -Psemanticdb ${{ 
inputs.maven_opts }}
+        ./mvnw -B compile test-compile scalafix:scalafix -Dscalafix.mode=CHECK 
-Psemanticdb ${{ inputs.maven_opts }}
 
     - name: Run tests
       shell: bash
diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md
index 25a6599..6dc0f1f 100644
--- a/DEVELOPMENT.md
+++ b/DEVELOPMENT.md
@@ -84,3 +84,7 @@ in the respective source code, e.g., 
`CometTPCHQueryBenchmark`.
 ## Debugging
 Comet is a multi-language project with native code written in Rust and JVM 
code written in Java and Scala.
 It is possible to debug both native and JVM code concurrently as described in 
the [DEBUGGING guide](DEBUGGING.md)
+
+## Submitting a Pull Request
+Comet uses `cargo fmt`, [Scalafix](https://github.com/scalacenter/scalafix) 
and [Spotless](https://github.com/diffplug/spotless/tree/main/plugin-maven) to 
+automatically format the code. Before submitting a pull request, you can 
simply run `make format` to format the code.
\ No newline at end of file
diff --git a/Makefile b/Makefile
index 6f599a0..ca5d756 100644
--- a/Makefile
+++ b/Makefile
@@ -38,6 +38,7 @@ clean:
 bench:
        cd core && RUSTFLAGS="-Ctarget-cpu=native" cargo bench $(filter-out 
$@,$(MAKECMDGOALS))
 format:
+       cd core && cargo fmt
        ./mvnw compile test-compile scalafix:scalafix -Psemanticdb $(PROFILES)
        ./mvnw spotless:apply $(PROFILES)
 
diff --git 
a/spark/src/main/scala/org/apache/comet/CometSparkSessionExtensions.scala 
b/spark/src/main/scala/org/apache/comet/CometSparkSessionExtensions.scala
index 87c2265..5720b69 100644
--- a/spark/src/main/scala/org/apache/comet/CometSparkSessionExtensions.scala
+++ b/spark/src/main/scala/org/apache/comet/CometSparkSessionExtensions.scala
@@ -26,7 +26,6 @@ import org.apache.spark.internal.Logging
 import org.apache.spark.network.util.ByteUnit
 import org.apache.spark.sql.SparkSession
 import org.apache.spark.sql.SparkSessionExtensions
-import org.apache.spark.sql.catalyst.expressions.AttributeReference
 import org.apache.spark.sql.catalyst.rules.Rule
 import org.apache.spark.sql.comet._
 import org.apache.spark.sql.comet.execution.shuffle.{CometColumnarShuffle, 
CometNativeShuffle}
diff --git a/spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala 
b/spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala
index 08a499b..902f703 100644
--- a/spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala
+++ b/spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala
@@ -28,7 +28,7 @@ import 
org.apache.spark.sql.catalyst.expressions.objects.StaticInvoke
 import org.apache.spark.sql.catalyst.optimizer.NormalizeNaNAndZero
 import org.apache.spark.sql.catalyst.plans.physical.{HashPartitioning, 
Partitioning, SinglePartition}
 import org.apache.spark.sql.catalyst.util.CharVarcharCodegenUtils
-import org.apache.spark.sql.comet.{CometHashAggregateExec, CometPlan, 
CometSinkPlaceHolder, DecimalPrecision}
+import org.apache.spark.sql.comet.{CometSinkPlaceHolder, DecimalPrecision}
 import org.apache.spark.sql.execution
 import org.apache.spark.sql.execution._
 import org.apache.spark.sql.execution.aggregate.HashAggregateExec
diff --git a/spark/src/test/scala/org/apache/comet/CometCastSuite.scala 
b/spark/src/test/scala/org/apache/comet/CometCastSuite.scala
index 565d226..317371f 100644
--- a/spark/src/test/scala/org/apache/comet/CometCastSuite.scala
+++ b/spark/src/test/scala/org/apache/comet/CometCastSuite.scala
@@ -90,13 +90,13 @@ class CometCastSuite extends CometTestBase with 
AdaptiveSparkPlanHelper {
     Range(0, len).map(_ => chars.charAt(r.nextInt(chars.length))).mkString
   }
 
-  private def fuzzCastFromString(chars: String, maxLen: Int, toType: DataType) 
{
+  private def fuzzCastFromString(chars: String, maxLen: Int, toType: 
DataType): Unit = {
     val r = new Random(0)
     val inputs = Range(0, 10000).map(_ => genString(r, chars, maxLen))
     castTest(inputs.toDF("a"), toType)
   }
 
-  private def castTest(input: DataFrame, toType: DataType) {
+  private def castTest(input: DataFrame, toType: DataType): Unit = {
     withTempPath { dir =>
       val df = roundtripParquet(input, dir)
         .withColumn("converted", col("a").cast(toType))
diff --git a/spark/src/test/scala/org/apache/comet/CometExpressionSuite.scala 
b/spark/src/test/scala/org/apache/comet/CometExpressionSuite.scala
index 7424f1b..803f30b 100644
--- a/spark/src/test/scala/org/apache/comet/CometExpressionSuite.scala
+++ b/spark/src/test/scala/org/apache/comet/CometExpressionSuite.scala
@@ -24,7 +24,7 @@ import java.util
 import org.apache.hadoop.fs.Path
 import org.apache.spark.sql.{CometTestBase, DataFrame, Row}
 import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper
-import org.apache.spark.sql.functions.{expr, lit}
+import org.apache.spark.sql.functions.expr
 import org.apache.spark.sql.internal.SQLConf
 import org.apache.spark.sql.internal.SQLConf.SESSION_LOCAL_TIMEZONE
 import org.apache.spark.sql.types.{Decimal, DecimalType, StructType}

Reply via email to