Github user maropu commented on a diff in the pull request:
https://github.com/apache/spark/pull/21266#discussion_r190129285
--- Diff:
sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DataSourceReadBenchmark.scala
---
@@ -0,0 +1,827 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.execution.benchmark
+
+import java.io.File
+
+import scala.collection.JavaConverters._
+import scala.util.{Random, Try}
+
+import org.apache.spark.SparkConf
+import org.apache.spark.sql.{DataFrame, DataFrameWriter, Row, SparkSession}
+import org.apache.spark.sql.catalyst.InternalRow
+import
org.apache.spark.sql.execution.datasources.parquet.{SpecificParquetRecordReaderBase,
VectorizedParquetRecordReader}
+import org.apache.spark.sql.internal.SQLConf
+import org.apache.spark.sql.types._
+import org.apache.spark.sql.vectorized.ColumnVector
+import org.apache.spark.util.{Benchmark, Utils}
+
+
+/**
+ * Benchmark to measure data source read performance.
+ * To run this:
+ * spark-submit --class <this class> <spark sql test jar>
+ */
+object DataSourceReadBenchmark {
+ val conf = new SparkConf()
+ .setAppName("DataSourceReadBenchmark")
+ .setIfMissing("spark.master", "local[1]")
+ .setIfMissing("spark.driver.memory", "3g")
+ .setIfMissing("spark.executor.memory", "3g")
+
+ val spark = SparkSession.builder.config(conf).getOrCreate()
+
+ // Set default configs. Individual cases will change them if necessary.
+ spark.conf.set(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key, "true")
+ spark.conf.set(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key, "true")
+ spark.conf.set(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key, "true")
+
+ def withTempPath(f: File => Unit): Unit = {
+ val path = Utils.createTempDir()
+ path.delete()
+ try f(path) finally Utils.deleteRecursively(path)
+ }
+
+ def withTempTable(tableNames: String*)(f: => Unit): Unit = {
+ try f finally tableNames.foreach(spark.catalog.dropTempView)
+ }
+
+ def withSQLConf(pairs: (String, String)*)(f: => Unit): Unit = {
+ val (keys, values) = pairs.unzip
+ val currentValues = keys.map(key => Try(spark.conf.get(key)).toOption)
+ (keys, values).zipped.foreach(spark.conf.set)
+ try f finally {
+ keys.zip(currentValues).foreach {
+ case (key, Some(value)) => spark.conf.set(key, value)
+ case (key, None) => spark.conf.unset(key)
+ }
+ }
+ }
+ private def prepareTable(dir: File, df: DataFrame, partition:
Option[String] = None): Unit = {
+ val testDf = if (partition.isDefined) {
+ df.write.partitionBy(partition.get)
+ } else {
+ df.write
+ }
+
+ saveAsCsvTable(testDf, dir.getCanonicalPath + "/csv")
+ saveAsJsonTable(testDf, dir.getCanonicalPath + "/json")
+ saveAsParquetTable(testDf, dir.getCanonicalPath + "/parquet")
+ saveAsOrcTable(testDf, dir.getCanonicalPath + "/orc")
+ }
+
+ private def saveAsCsvTable(df: DataFrameWriter[Row], dir: String): Unit
= {
+ df.mode("overwrite").option("compression", "gzip").option("header",
true).csv(dir)
+ spark.read.option("header",
true).csv(dir).createOrReplaceTempView("csvTable")
+ }
+
+ private def saveAsJsonTable(df: DataFrameWriter[Row], dir: String): Unit
= {
+ df.mode("overwrite").option("compression", "gzip").json(dir)
+ spark.read.json(dir).createOrReplaceTempView("jsonTable")
+ }
+
+ private def saveAsParquetTable(df: DataFrameWriter[Row], dir: String):
Unit = {
+ df.mode("overwrite").option("compression", "snappy").parquet(dir)
+ spark.read.parquet(dir).createOrReplaceTempView("parquetTable")
+ }
+
+ private def saveAsOrcTable(df: DataFrameWriter[Row], dir: String): Unit
= {
+ df.mode("overwrite").option("compression", "snappy").orc(dir)
+ spark.read.orc(dir).createOrReplaceTempView("orcTable")
+ }
+
+ def numericScanBenchmark(values: Int, dataType: DataType): Unit = {
+ // Benchmarks running through spark sql.
+ val sqlBenchmark = new Benchmark(s"SQL Single ${dataType.sql} Column
Scan", values)
+
+ // Benchmarks driving reader component directly.
+ val parquetReaderBenchmark = new Benchmark(
+ s"Parquet Reader Single ${dataType.sql} Column Scan", values)
+
+ withTempPath { dir =>
+ withTempTable("t1", "csvTable", "jsonTable", "parquetTable",
"orcTable") {
+ import spark.implicits._
+ spark.range(values).map(_ =>
Random.nextLong).createOrReplaceTempView("t1")
+
+ prepareTable(dir, spark.sql(s"SELECT CAST(value as
${dataType.sql}) id FROM t1"))
+
+ sqlBenchmark.addCase("SQL CSV") { _ =>
+ spark.sql("select sum(id) from csvTable").collect()
+ }
+
+ sqlBenchmark.addCase("SQL Json") { _ =>
+ spark.sql("select sum(id) from jsonTable").collect()
+ }
+
+ sqlBenchmark.addCase("SQL Parquet Vectorized") { _ =>
+ spark.sql("select sum(id) from parquetTable").collect()
+ }
+
+ sqlBenchmark.addCase("SQL Parquet MR") { _ =>
+ withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key ->
"false") {
+ spark.sql("select sum(id) from parquetTable").collect()
+ }
+ }
+
+ sqlBenchmark.addCase("SQL ORC Vectorized") { _ =>
+ spark.sql("SELECT sum(id) FROM orcTable").collect()
--- End diff --
I checked that `ORC_COPY_BATCH_TO_SPARK`=`false` in other tests (I didn't
find performance differences after explicitly setting `false` in line 50.
https://github.com/apache/spark/pull/21266/files#diff-ae11b49db05c9e6829cad071b112a742R50
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]