This is an automated email from the ASF dual-hosted git repository.

yamamuro pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new 3aa659c  [MINOR][SQL][TESTS] Disable UI in SQL benchmarks by default
3aa659c is described below

commit 3aa659ce29877f386a24da9d04e66069d04afaa8
Author: Max Gekk <max.g...@gmail.com>
AuthorDate: Sat May 2 17:54:36 2020 +0900

    [MINOR][SQL][TESTS] Disable UI in SQL benchmarks by default
    
    ### What changes were proposed in this pull request?
    Set `spark.ui.enabled` to `false` in `SqlBasedBenchmark.getSparkSession`. 
This disables UI in all SQL benchmarks by default.
    
    ### Why are the changes needed?
    UI overhead lowers numbers in the `Relative` column and impacts on `Stdev` 
in benchmark results.
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    Checked by running `DateTimeRebaseBenchmark`.
    
    Closes #28432 from MaxGekk/ui-off-in-benchmarks.
    
    Authored-by: Max Gekk <max.g...@gmail.com>
    Signed-off-by: Takeshi Yamamuro <yamam...@apache.org>
    (cherry picked from commit 13dddee9a8490ead00ff00bd741db4a170dfd759)
    Signed-off-by: Takeshi Yamamuro <yamam...@apache.org>
---
 .../apache/spark/sql/execution/benchmark/DataSourceReadBenchmark.scala  | 2 --
 .../apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala  | 2 --
 .../org/apache/spark/sql/execution/benchmark/SqlBasedBenchmark.scala    | 2 ++
 3 files changed, 2 insertions(+), 4 deletions(-)

diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DataSourceReadBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DataSourceReadBenchmark.scala
index d29c5e3..0fc43c7 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DataSourceReadBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/DataSourceReadBenchmark.scala
@@ -23,7 +23,6 @@ import scala.util.Random
 
 import org.apache.spark.SparkConf
 import org.apache.spark.benchmark.Benchmark
-import org.apache.spark.internal.config.UI._
 import org.apache.spark.sql.{DataFrame, DataFrameWriter, Row, SparkSession}
 import org.apache.spark.sql.catalyst.InternalRow
 import 
org.apache.spark.sql.execution.datasources.parquet.{SpecificParquetRecordReaderBase,
 VectorizedParquetRecordReader}
@@ -52,7 +51,6 @@ object DataSourceReadBenchmark extends SqlBasedBenchmark {
       .set("spark.master", "local[1]")
       .setIfMissing("spark.driver.memory", "3g")
       .setIfMissing("spark.executor.memory", "3g")
-      .setIfMissing(UI_ENABLED, false)
 
     val sparkSession = SparkSession.builder.config(conf).getOrCreate()
 
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala
index 444ffa4..b3f65d4 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala
@@ -23,7 +23,6 @@ import scala.util.Random
 
 import org.apache.spark.SparkConf
 import org.apache.spark.benchmark.Benchmark
-import org.apache.spark.internal.config.UI._
 import org.apache.spark.sql.{DataFrame, SparkSession}
 import org.apache.spark.sql.functions.monotonically_increasing_id
 import org.apache.spark.sql.internal.SQLConf
@@ -49,7 +48,6 @@ object FilterPushdownBenchmark extends SqlBasedBenchmark {
       .set("spark.master", "local[1]")
       .setIfMissing("spark.driver.memory", "3g")
       .setIfMissing("spark.executor.memory", "3g")
-      .setIfMissing(UI_ENABLED, false)
       .setIfMissing("orc.compression", "snappy")
       .setIfMissing("spark.sql.parquet.compression.codec", "snappy")
 
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/SqlBasedBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/SqlBasedBenchmark.scala
index ee7a03e..28387dc 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/SqlBasedBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/SqlBasedBenchmark.scala
@@ -18,6 +18,7 @@
 package org.apache.spark.sql.execution.benchmark
 
 import org.apache.spark.benchmark.{Benchmark, BenchmarkBase}
+import org.apache.spark.internal.config.UI.UI_ENABLED
 import org.apache.spark.sql.{Dataset, SparkSession}
 import org.apache.spark.sql.SaveMode.Overwrite
 import org.apache.spark.sql.catalyst.plans.SQLHelper
@@ -37,6 +38,7 @@ trait SqlBasedBenchmark extends BenchmarkBase with SQLHelper {
       .appName(this.getClass.getCanonicalName)
       .config(SQLConf.SHUFFLE_PARTITIONS.key, 1)
       .config(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key, 1)
+      .config(UI_ENABLED.key, false)
       .getOrCreate()
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to