Repository: spark
Updated Branches:
  refs/heads/master bebd2e1ce -> af8a34c78


[SPARK-22159][SQL][FOLLOW-UP] Make config names consistently end with "enabled".

## What changes were proposed in this pull request?

This is a follow-up of #19384.

In the previous pr, only definitions of the config names were modified, but we 
also need to modify the names in runtime or tests specified as string literal.

## How was this patch tested?

Existing tests but modified the config names.

Author: Takuya UESHIN <[email protected]>

Closes #19462 from ueshin/issues/SPARK-22159/fup1.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/af8a34c7
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/af8a34c7
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/af8a34c7

Branch: refs/heads/master
Commit: af8a34c787dc3d68f5148a7d9975b52650bb7729
Parents: bebd2e1
Author: Takuya UESHIN <[email protected]>
Authored: Mon Oct 9 22:35:34 2017 -0700
Committer: gatorsmile <[email protected]>
Committed: Mon Oct 9 22:35:34 2017 -0700

----------------------------------------------------------------------
 python/pyspark/sql/dataframe.py                 |  4 ++--
 python/pyspark/sql/tests.py                     |  6 +++---
 .../execution/aggregate/HashAggregateExec.scala |  2 +-
 .../spark/sql/AggregateHashMapSuite.scala       | 12 ++++++------
 .../benchmark/AggregateBenchmark.scala          | 20 ++++++++++----------
 .../hive/execution/AggregationQuerySuite.scala  |  2 +-
 6 files changed, 23 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/af8a34c7/python/pyspark/sql/dataframe.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/dataframe.py b/python/pyspark/sql/dataframe.py
index b7ce9a8..fe69e58 100644
--- a/python/pyspark/sql/dataframe.py
+++ b/python/pyspark/sql/dataframe.py
@@ -1878,7 +1878,7 @@ class DataFrame(object):
         1    5    Bob
         """
         import pandas as pd
-        if self.sql_ctx.getConf("spark.sql.execution.arrow.enable", 
"false").lower() == "true":
+        if self.sql_ctx.getConf("spark.sql.execution.arrow.enabled", 
"false").lower() == "true":
             try:
                 import pyarrow
                 tables = self._collectAsArrow()
@@ -1889,7 +1889,7 @@ class DataFrame(object):
                     return pd.DataFrame.from_records([], columns=self.columns)
             except ImportError as e:
                 msg = "note: pyarrow must be installed and available on 
calling Python process " \
-                      "if using spark.sql.execution.arrow.enable=true"
+                      "if using spark.sql.execution.arrow.enabled=true"
                 raise ImportError("%s\n%s" % (e.message, msg))
         else:
             pdf = pd.DataFrame.from_records(self.collect(), 
columns=self.columns)

http://git-wip-us.apache.org/repos/asf/spark/blob/af8a34c7/python/pyspark/sql/tests.py
----------------------------------------------------------------------
diff --git a/python/pyspark/sql/tests.py b/python/pyspark/sql/tests.py
index 1b3af42..a59378b 100644
--- a/python/pyspark/sql/tests.py
+++ b/python/pyspark/sql/tests.py
@@ -3088,7 +3088,7 @@ class ArrowTests(ReusedPySparkTestCase):
     def setUpClass(cls):
         ReusedPySparkTestCase.setUpClass()
         cls.spark = SparkSession(cls.sc)
-        cls.spark.conf.set("spark.sql.execution.arrow.enable", "true")
+        cls.spark.conf.set("spark.sql.execution.arrow.enabled", "true")
         cls.schema = StructType([
             StructField("1_str_t", StringType(), True),
             StructField("2_int_t", IntegerType(), True),
@@ -3120,9 +3120,9 @@ class ArrowTests(ReusedPySparkTestCase):
 
     def test_toPandas_arrow_toggle(self):
         df = self.spark.createDataFrame(self.data, schema=self.schema)
-        self.spark.conf.set("spark.sql.execution.arrow.enable", "false")
+        self.spark.conf.set("spark.sql.execution.arrow.enabled", "false")
         pdf = df.toPandas()
-        self.spark.conf.set("spark.sql.execution.arrow.enable", "true")
+        self.spark.conf.set("spark.sql.execution.arrow.enabled", "true")
         pdf_arrow = df.toPandas()
         self.assertFramesEqual(pdf_arrow, pdf)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/af8a34c7/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashAggregateExec.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashAggregateExec.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashAggregateExec.scala
index f424096..8b573fd 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashAggregateExec.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/HashAggregateExec.scala
@@ -539,7 +539,7 @@ case class HashAggregateExec(
   private def enableTwoLevelHashMap(ctx: CodegenContext) = {
     if (!checkIfFastHashMapSupported(ctx)) {
       if (modes.forall(mode => mode == Partial || mode == PartialMerge) && 
!Utils.isTesting) {
-        logInfo("spark.sql.codegen.aggregate.map.twolevel.enable is set to 
true, but"
+        logInfo("spark.sql.codegen.aggregate.map.twolevel.enabled is set to 
true, but"
           + " current version of codegened fast hashmap does not support this 
aggregate.")
       }
     } else {

http://git-wip-us.apache.org/repos/asf/spark/blob/af8a34c7/sql/core/src/test/scala/org/apache/spark/sql/AggregateHashMapSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/AggregateHashMapSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/AggregateHashMapSuite.scala
index 7e61a68..938d76c 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/AggregateHashMapSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/AggregateHashMapSuite.scala
@@ -24,14 +24,14 @@ import org.apache.spark.SparkConf
 class SingleLevelAggregateHashMapSuite extends DataFrameAggregateSuite with 
BeforeAndAfter {
   override protected def sparkConf: SparkConf = super.sparkConf
     .set("spark.sql.codegen.fallback", "false")
-    .set("spark.sql.codegen.aggregate.map.twolevel.enable", "false")
+    .set("spark.sql.codegen.aggregate.map.twolevel.enabled", "false")
 
   // adding some checking after each test is run, assuring that the configs 
are not changed
   // in test code
   after {
     assert(sparkConf.get("spark.sql.codegen.fallback") == "false",
       "configuration parameter changed in test body")
-    assert(sparkConf.get("spark.sql.codegen.aggregate.map.twolevel.enable") == 
"false",
+    assert(sparkConf.get("spark.sql.codegen.aggregate.map.twolevel.enabled") 
== "false",
       "configuration parameter changed in test body")
   }
 }
@@ -39,14 +39,14 @@ class SingleLevelAggregateHashMapSuite extends 
DataFrameAggregateSuite with Befo
 class TwoLevelAggregateHashMapSuite extends DataFrameAggregateSuite with 
BeforeAndAfter {
   override protected def sparkConf: SparkConf = super.sparkConf
     .set("spark.sql.codegen.fallback", "false")
-    .set("spark.sql.codegen.aggregate.map.twolevel.enable", "true")
+    .set("spark.sql.codegen.aggregate.map.twolevel.enabled", "true")
 
   // adding some checking after each test is run, assuring that the configs 
are not changed
   // in test code
   after {
     assert(sparkConf.get("spark.sql.codegen.fallback") == "false",
       "configuration parameter changed in test body")
-    assert(sparkConf.get("spark.sql.codegen.aggregate.map.twolevel.enable") == 
"true",
+    assert(sparkConf.get("spark.sql.codegen.aggregate.map.twolevel.enabled") 
== "true",
       "configuration parameter changed in test body")
   }
 }
@@ -57,7 +57,7 @@ class TwoLevelAggregateHashMapWithVectorizedMapSuite
 
   override protected def sparkConf: SparkConf = super.sparkConf
     .set("spark.sql.codegen.fallback", "false")
-    .set("spark.sql.codegen.aggregate.map.twolevel.enable", "true")
+    .set("spark.sql.codegen.aggregate.map.twolevel.enabled", "true")
     .set("spark.sql.codegen.aggregate.map.vectorized.enable", "true")
 
   // adding some checking after each test is run, assuring that the configs 
are not changed
@@ -65,7 +65,7 @@ class TwoLevelAggregateHashMapWithVectorizedMapSuite
   after {
     assert(sparkConf.get("spark.sql.codegen.fallback") == "false",
       "configuration parameter changed in test body")
-    assert(sparkConf.get("spark.sql.codegen.aggregate.map.twolevel.enable") == 
"true",
+    assert(sparkConf.get("spark.sql.codegen.aggregate.map.twolevel.enabled") 
== "true",
       "configuration parameter changed in test body")
     assert(sparkConf.get("spark.sql.codegen.aggregate.map.vectorized.enable") 
== "true",
       "configuration parameter changed in test body")

http://git-wip-us.apache.org/repos/asf/spark/blob/af8a34c7/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala
index aca1be0..a834b7c 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala
@@ -107,14 +107,14 @@ class AggregateBenchmark extends BenchmarkBase {
 
     benchmark.addCase(s"codegen = T hashmap = F", numIters = 3) { iter =>
       sparkSession.conf.set("spark.sql.codegen.wholeStage", "true")
-      sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", 
"false")
+      
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", 
"false")
       
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", 
"false")
       f()
     }
 
     benchmark.addCase(s"codegen = T hashmap = T", numIters = 5) { iter =>
       sparkSession.conf.set("spark.sql.codegen.wholeStage", "true")
-      sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", 
"true")
+      
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", 
"true")
       
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", 
"true")
       f()
     }
@@ -149,14 +149,14 @@ class AggregateBenchmark extends BenchmarkBase {
 
     benchmark.addCase(s"codegen = T hashmap = F", numIters = 3) { iter =>
       sparkSession.conf.set("spark.sql.codegen.wholeStage", value = true)
-      sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", 
"false")
+      
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", 
"false")
       
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", 
"false")
       f()
     }
 
     benchmark.addCase(s"codegen = T hashmap = T", numIters = 5) { iter =>
       sparkSession.conf.set("spark.sql.codegen.wholeStage", value = true)
-      sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", 
"true")
+      
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", 
"true")
       
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", 
"true")
       f()
     }
@@ -189,14 +189,14 @@ class AggregateBenchmark extends BenchmarkBase {
 
     benchmark.addCase(s"codegen = T hashmap = F", numIters = 3) { iter =>
       sparkSession.conf.set("spark.sql.codegen.wholeStage", "true")
-      sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", 
"false")
+      
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", 
"false")
       
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", 
"false")
       f()
     }
 
     benchmark.addCase(s"codegen = T hashmap = T", numIters = 5) { iter =>
       sparkSession.conf.set("spark.sql.codegen.wholeStage", "true")
-      sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", 
"true")
+      
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", 
"true")
       
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", 
"true")
       f()
     }
@@ -228,14 +228,14 @@ class AggregateBenchmark extends BenchmarkBase {
 
     benchmark.addCase(s"codegen = T hashmap = F") { iter =>
       sparkSession.conf.set("spark.sql.codegen.wholeStage", "true")
-      sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", 
"false")
+      
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", 
"false")
       
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", 
"false")
       f()
     }
 
     benchmark.addCase(s"codegen = T hashmap = T") { iter =>
       sparkSession.conf.set("spark.sql.codegen.wholeStage", "true")
-      sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", 
"true")
+      
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", 
"true")
       
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", 
"true")
       f()
     }
@@ -277,14 +277,14 @@ class AggregateBenchmark extends BenchmarkBase {
 
     benchmark.addCase(s"codegen = T hashmap = F") { iter =>
       sparkSession.conf.set("spark.sql.codegen.wholeStage", "true")
-      sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", 
"false")
+      
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", 
"false")
       
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", 
"false")
       f()
     }
 
     benchmark.addCase(s"codegen = T hashmap = T") { iter =>
       sparkSession.conf.set("spark.sql.codegen.wholeStage", "true")
-      sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enable", 
"true")
+      
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", 
"true")
       
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", 
"true")
       f()
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/af8a34c7/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala
index f245a79..ae67514 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/AggregationQuerySuite.scala
@@ -1015,7 +1015,7 @@ class HashAggregationQueryWithControlledFallbackSuite 
extends AggregationQuerySu
 
   override protected def checkAnswer(actual: => DataFrame, expectedAnswer: 
Seq[Row]): Unit = {
     Seq("true", "false").foreach { enableTwoLevelMaps =>
-      withSQLConf("spark.sql.codegen.aggregate.map.twolevel.enable" ->
+      withSQLConf("spark.sql.codegen.aggregate.map.twolevel.enabled" ->
         enableTwoLevelMaps) {
         (1 to 3).foreach { fallbackStartsAt =>
           withSQLConf("spark.sql.TungstenAggregate.testFallbackStartsAt" ->


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to