Github user dongjoon-hyun commented on a diff in the pull request:
https://github.com/apache/spark/pull/19943#discussion_r158895355
--- Diff:
sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcReadBenchmark.scala ---
@@ -0,0 +1,357 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.hive.orc
+
+import java.io.File
+
+import scala.util.Random
+
+import org.apache.spark.SparkConf
+import org.apache.spark.sql.{DataFrame, SparkSession}
+import org.apache.spark.sql.internal.SQLConf
+import org.apache.spark.sql.types._
+import org.apache.spark.util.{Benchmark, Utils}
+
+
+/**
+ * Benchmark to measure ORC read performance.
+ *
+ * This is in `sql/hive` module in order to compare `sql/core` and
`sql/hive` ORC data sources.
+ */
+// scalastyle:off line.size.limit
+object OrcReadBenchmark {
+ val conf = new SparkConf()
+ conf.set("orc.compression", "snappy")
+
+ private val spark = SparkSession.builder()
+ .master("local[1]")
+ .appName("OrcReadBenchmark")
+ .config(conf)
+ .getOrCreate()
+
+ // Set default configs. Individual cases will change them if necessary.
+ spark.conf.set(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key, "true")
+
+ def withTempPath(f: File => Unit): Unit = {
+ val path = Utils.createTempDir()
+ path.delete()
+ try f(path) finally Utils.deleteRecursively(path)
+ }
+
+ def withTempTable(tableNames: String*)(f: => Unit): Unit = {
+ try f finally tableNames.foreach(spark.catalog.dropTempView)
+ }
+
+ private val NATIVE_ORC_FORMAT =
"org.apache.spark.sql.execution.datasources.orc.OrcFileFormat"
+ private val HIVE_ORC_FORMAT =
"org.apache.spark.sql.hive.orc.OrcFileFormat"
+
+ private def prepareTable(dir: File, df: DataFrame, partition:
Option[String] = None): Unit = {
+ val dirORC = dir.getCanonicalPath
+
+ if (partition.isDefined) {
+ df.write.partitionBy(partition.get).orc(dirORC)
+ } else {
+ df.write.orc(dirORC)
+ }
+
+
spark.read.format(NATIVE_ORC_FORMAT).load(dirORC).createOrReplaceTempView("nativeOrcTable")
+
spark.read.format(HIVE_ORC_FORMAT).load(dirORC).createOrReplaceTempView("hiveOrcTable")
+ }
+
+ def numericScanBenchmark(values: Int, dataType: DataType): Unit = {
+ val sqlBenchmark = new Benchmark(s"SQL Single ${dataType.sql} Column
Scan", values)
+
+ withTempPath { dir =>
+ withTempTable("t1", "nativeOrcTable", "hiveOrcTable") {
+ import spark.implicits._
+ spark.range(values).map(_ =>
Random.nextLong).createOrReplaceTempView("t1")
+
+ prepareTable(dir, spark.sql(s"SELECT CAST(value as
${dataType.sql}) id FROM t1"))
+
+ sqlBenchmark.addCase("Native ORC") { _ =>
+ spark.sql("SELECT sum(id) FROM nativeOrcTable").collect()
+ }
+
+ sqlBenchmark.addCase("Hive built-in ORC") { _ =>
+ spark.sql("SELECT sum(id) FROM hiveOrcTable").collect()
+ }
+
+ /*
+ Java HotSpot(TM) 64-Bit Server VM 1.8.0_152-b16 on Mac OS X 10.13.1
+ Intel(R) Core(TM) i7-4770HQ CPU @ 2.20GHz
+
+ SQL Single TINYINT Column Scan: Best/Avg Time(ms)
Rate(M/s) Per Row(ns) Relative
+
------------------------------------------------------------------------------------------------
+ Native ORC 132 / 138
119.4 8.4 1.0X
+ Hive built-in ORC 1328 / 1333
11.8 84.5 0.1X
+
+ SQL Single SMALLINT Column Scan: Best/Avg Time(ms)
Rate(M/s) Per Row(ns) Relative
+
------------------------------------------------------------------------------------------------
+ Native ORC 178 / 188
88.2 11.3 1.0X
+ Hive built-in ORC 1541 / 1560
10.2 98.0 0.1X
+
+ SQL Single INT Column Scan: Best/Avg Time(ms)
Rate(M/s) Per Row(ns) Relative
+
------------------------------------------------------------------------------------------------
+ Native ORC 242 / 256
64.9 15.4 1.0X
+ Hive built-in ORC 1650 / 1676
9.5 104.9 0.1X
+
+ SQL Single BIGINT Column Scan: Best/Avg Time(ms)
Rate(M/s) Per Row(ns) Relative
+
------------------------------------------------------------------------------------------------
+ Native ORC 297 / 309
53.0 18.9 1.0X
+ Hive built-in ORC 1750 / 1766
9.0 111.3 0.2X
+
+ SQL Single FLOAT Column Scan: Best/Avg Time(ms)
Rate(M/s) Per Row(ns) Relative
+
------------------------------------------------------------------------------------------------
+ Native ORC 352 / 363
44.7 22.4 1.0X
+ Hive built-in ORC 1749 / 1764
9.0 111.2 0.2X
+
+ SQL Single DOUBLE Column Scan: Best/Avg Time(ms)
Rate(M/s) Per Row(ns) Relative
+
------------------------------------------------------------------------------------------------
+ Native ORC 436 / 456
36.1 27.7 1.0X
+ Hive built-in ORC 1852 / 1860
8.5 117.8 0.2X
+ */
+ sqlBenchmark.run()
+ }
+ }
+ }
+
+ def intStringScanBenchmark(values: Int): Unit = {
+ val benchmark = new Benchmark("Int and String Scan", values)
+
+ withTempPath { dir =>
+ withTempTable("t1", "nativeOrcTable", "hiveOrcTable") {
+ import spark.implicits._
+ spark.range(values).map(_ =>
Random.nextLong).createOrReplaceTempView("t1")
+
+ prepareTable(
+ dir,
+ spark.sql("SELECT CAST(value AS INT) AS c1, CAST(value as
STRING) AS c2 FROM t1"))
+
+ benchmark.addCase("Native ORC") { _ =>
+ spark.sql("SELECT sum(c1), sum(length(c2)) FROM
nativeOrcTable").collect()
+ }
+
+ benchmark.addCase("Hive built-in ORC") { _ =>
+ spark.sql("SELECT sum(c1), sum(length(c2)) FROM
hiveOrcTable").collect()
+ }
+
+ /*
+ Java HotSpot(TM) 64-Bit Server VM 1.8.0_152-b16 on Mac OS X 10.13.1
+ Intel(R) Core(TM) i7-4770HQ CPU @ 2.20GHz
+
+ Int and String Scan: Best/Avg Time(ms)
Rate(M/s) Per Row(ns) Relative
+
------------------------------------------------------------------------------------------------
+ Native ORC 1374 / 1376
7.6 131.0 1.0X
+ Hive built-in ORC 3653 / 3664
2.9 348.4 0.4X
+ */
+ benchmark.run()
+ }
+ }
+ }
+
+ def partitionTableScanBenchmark(values: Int): Unit = {
+ val benchmark = new Benchmark("Partitioned Table", values)
+
+ withTempPath { dir =>
+ withTempTable("t1", "nativeOrcTable", "hiveOrcTable") {
+ import spark.implicits._
+ spark.range(values).map(_ =>
Random.nextLong).createOrReplaceTempView("t1")
+
+ prepareTable(dir, spark.sql("SELECT value % 2 AS p, value AS id
FROM t1"), Some("p"))
+
+ benchmark.addCase("Read data column - Native ORC") { _ =>
+ spark.sql("SELECT sum(id) FROM nativeOrcTable").collect()
+ }
+
+ benchmark.addCase("Read data column - Hive built-in ORC") { _ =>
+ spark.sql("SELECT sum(id) FROM hiveOrcTable").collect()
+ }
+
+ benchmark.addCase("Read partition column - Native ORC") { _ =>
+ spark.sql("SELECT sum(p) FROM nativeOrcTable").collect()
+ }
+
+ benchmark.addCase("Read partition column - Hive built-in ORC") { _
=>
+ spark.sql("SELECT sum(p) FROM hiveOrcTable").collect()
+ }
+
+ benchmark.addCase("Read both columns - Native ORC") { _ =>
+ spark.sql("SELECT sum(p), sum(id) FROM nativeOrcTable").collect()
+ }
+
+ benchmark.addCase("Read both columns - Hive built-in ORC") { _ =>
+ spark.sql("SELECT sum(p), sum(id) FROM hiveOrcTable").collect()
+ }
+
+ /*
+ Java HotSpot(TM) 64-Bit Server VM 1.8.0_152-b16 on Mac OS X 10.13.1
+ Intel(R) Core(TM) i7-4770HQ CPU @ 2.20GHz
+
+ Partitioned Table: Best/Avg Time(ms)
Rate(M/s) Per Row(ns) Relative
+
------------------------------------------------------------------------------------------------
+ Read data column - Native ORC 321 / 327
49.0 20.4 1.0X
+ Read data column - Hive built-in ORC 2041 / 2176
7.7 129.8 0.2X
+ Read partition column - Native ORC 53 / 57
298.2 3.4 6.1X
+ Read partition column - Hive built-in ORC 1176 / 1183
13.4 74.7 0.3X
+ Read both columns - Native ORC 335 / 340
47.0 21.3 1.0X
+ Read both columns - Hive built-in ORC 1970 / 1974
8.0 125.2 0.2X
+ */
+ benchmark.run()
+ }
+ }
+ }
+
+ def stringDictionaryScanBenchmark(values: Int): Unit = {
+ val benchmark = new Benchmark("String Dictionary", values)
+
+ withTempPath { dir =>
+ withTempTable("t1", "nativeOrcTable", "hiveOrcTable") {
+ spark.range(values).createOrReplaceTempView("t1")
+
+ prepareTable(dir, spark.sql("SELECT CAST((id % 200) + 10000 as
STRING) AS c1 FROM t1"))
+
+ benchmark.addCase("Native ORC") { _ =>
+ spark.sql("SELECT sum(length(c1)) FROM nativeOrcTable").collect()
+ }
+
+ benchmark.addCase("Hive built-in ORC") { _ =>
+ spark.sql("SELECT sum(length(c1)) FROM hiveOrcTable").collect()
+ }
+
+ /*
+ Java HotSpot(TM) 64-Bit Server VM 1.8.0_152-b16 on Mac OS X 10.13.1
+ Intel(R) Core(TM) i7-4770HQ CPU @ 2.20GHz
+
+ String Dictionary: Best/Avg Time(ms)
Rate(M/s) Per Row(ns) Relative
+
------------------------------------------------------------------------------------------------
+ Native ORC 363 / 382
28.9 34.7 1.0X
+ Hive built-in ORC 2012 / 2080
5.2 191.9 0.2X
+ */
+ benchmark.run()
+ }
+ }
+ }
+
+ def stringWithNullsScanBenchmark(values: Int, fractionOfNulls: Double):
Unit = {
+ withTempPath { dir =>
+ withTempTable("t1", "nativeOrcTable", "hiveOrcTable") {
+ spark.range(values).createOrReplaceTempView("t1")
+
+ prepareTable(
+ dir,
+ spark.sql(
+ s"SELECT IF(RAND(1) < $fractionOfNulls, NULL, CAST(id as
STRING)) AS c1, " +
+ s"IF(RAND(2) < $fractionOfNulls, NULL, CAST(id as STRING)) AS
c2 FROM t1"))
+
+ val benchmark = new Benchmark("String with Nulls Scan", values)
+
+ benchmark.addCase(s"Native ORC ($fractionOfNulls%)") { iter =>
+ spark.sql("SELECT SUM(LENGTH(c2)) FROM nativeOrcTable " +
+ "WHERE c1 IS NOT NULL AND c2 IS NOT NULL").collect()
+ }
+
+ benchmark.addCase(s"Hive built-in ORC ($fractionOfNulls%)") { iter
=>
+ spark.sql("SELECT SUM(LENGTH(c2)) FROM hiveOrcTable " +
+ "WHERE c1 IS NOT NULL AND c2 IS NOT NULL").collect()
+ }
+
+ /*
+ Java HotSpot(TM) 64-Bit Server VM 1.8.0_152-b16 on Mac OS X 10.13.1
+ Intel(R) Core(TM) i7-4770HQ CPU @ 2.20GHz
+
+ String with Nulls Scan: Best/Avg Time(ms)
Rate(M/s) Per Row(ns) Relative
+
------------------------------------------------------------------------------------------------
+ Native ORC (0.0%) 1120 / 1142
9.4 106.8 1.0X
+ Hive built-in ORC (0.0%) 4232 / 4284
2.5 403.6 0.3X
+
+ String with Nulls Scan: Best/Avg Time(ms)
Rate(M/s) Per Row(ns) Relative
+
------------------------------------------------------------------------------------------------
+ Native ORC (0.5%) 1474 / 1512
7.1 140.5 1.0X
+ Hive built-in ORC (0.5%) 3114 / 3140
3.4 297.0 0.5X
+
+ String with Nulls Scan: Best/Avg Time(ms)
Rate(M/s) Per Row(ns) Relative
+
------------------------------------------------------------------------------------------------
+ Native ORC (0.95%) 568 / 589
18.5 54.1 1.0X
+ Hive built-in ORC (0.95%) 1548 / 1549
6.8 147.6 0.4X
+ */
+ benchmark.run()
+ }
+ }
+ }
+
+ def columnsBenchmark(values: Int, width: Int): Unit = {
+ val sqlBenchmark = new Benchmark(s"SQL Single Column Scan FROM
$width-Column Rows", values)
+
+ withTempPath { dir =>
+ withTempTable("t1", "nativeOrcTable", "hiveOrcTable") {
+ import spark.implicits._
+ val middle = width / 2
+ val selectExpr = (1 to width).map(i => s"value as c$i")
+ spark.range(values).map(_ => Random.nextLong).toDF()
+ .selectExpr(selectExpr: _*).createOrReplaceTempView("t1")
+
+ prepareTable(dir, spark.sql(s"SELECT * FROM t1"))
--- End diff --
Thanks. It's removed.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]