This is an automated email from the ASF dual-hosted git repository. gurwls223 pushed a commit to branch branch-3.3 in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/branch-3.3 by this push: new 51ed6babc4b [SPARK-41962][MINOR][SQL] Update the order of imports in class SpecificParquetRecordReaderBase 51ed6babc4b is described below commit 51ed6babc4bdefe8d2f4c6297adb3aa1dfb33ccd Author: wayneguow <guo...@gmail.com> AuthorDate: Tue Feb 7 16:11:09 2023 +0900 [SPARK-41962][MINOR][SQL] Update the order of imports in class SpecificParquetRecordReaderBase ### What changes were proposed in this pull request? Update the order of imports in class SpecificParquetRecordReaderBase. ### Why are the changes needed? Follow the code style. ### Does this PR introduce _any_ user-facing change? No ### How was this patch tested? Passed GA. Closes #39906 from wayneguow/import. Authored-by: wayneguow <guo...@gmail.com> Signed-off-by: Hyukjin Kwon <gurwls...@apache.org> (cherry picked from commit d6134f78d3d448a990af53beb8850ff91b71aef6) Signed-off-by: Hyukjin Kwon <gurwls...@apache.org> --- .../datasources/parquet/SpecificParquetRecordReaderBase.java | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java index 292a0f98af1..48016c3fdc0 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java +++ b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java @@ -28,12 +28,9 @@ import java.util.List; import java.util.Map; import java.util.Set; -import com.google.common.annotations.VisibleForTesting; -import org.apache.parquet.VersionParser; -import org.apache.parquet.VersionParser.ParsedVersion; -import org.apache.parquet.column.page.PageReadStore; import scala.Option; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapred.FileSplit; @@ -42,6 +39,9 @@ import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; import org.apache.parquet.HadoopReadOptions; import org.apache.parquet.ParquetReadOptions; +import org.apache.parquet.VersionParser; +import org.apache.parquet.VersionParser.ParsedVersion; +import org.apache.parquet.column.page.PageReadStore; import org.apache.parquet.hadoop.BadConfigurationException; import org.apache.parquet.hadoop.ParquetFileReader; import org.apache.parquet.hadoop.ParquetInputFormat; @@ -51,6 +51,7 @@ import org.apache.parquet.hadoop.util.ConfigurationUtil; import org.apache.parquet.hadoop.util.HadoopInputFile; import org.apache.parquet.schema.MessageType; import org.apache.parquet.schema.Types; + import org.apache.spark.TaskContext; import org.apache.spark.TaskContext$; import org.apache.spark.sql.internal.SQLConf; --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org