This is an automated email from the ASF dual-hosted git repository.

dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 447bfdec8 [SPARK-27844][SQL] Avoid hard-coded config: 
spark.rdd.parallelListingThreshold in SQL module
447bfdec8 is described below

commit 447bfdec830ba5eaaee791e86caad39f4f6661eb
Author: Yuming Wang <[email protected]>
AuthorDate: Sun May 26 09:00:01 2019 -0700

    [SPARK-27844][SQL] Avoid hard-coded config: 
spark.rdd.parallelListingThreshold in SQL module
    
    ## What changes were proposed in this pull request?
    
    Avoid hard-coded config: `spark.rdd.parallelListingThreshold`.
    
    ## How was this patch tested?
    
    N/A
    
    Closes #24708 from wangyum/spark.rdd.parallelListingThreshold.
    
    Authored-by: Yuming Wang <[email protected]>
    Signed-off-by: Dongjoon Hyun <[email protected]>
---
 .../src/main/scala/org/apache/spark/sql/execution/command/ddl.scala  | 3 ++-
 .../test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala | 5 +++--
 2 files changed, 5 insertions(+), 3 deletions(-)

diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala
index 1d3cc88..ee5d37c 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala
@@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.fs._
 import org.apache.hadoop.mapred.{FileInputFormat, JobConf}
 
+import org.apache.spark.internal.config.RDD_PARALLEL_LISTING_THRESHOLD
 import org.apache.spark.sql.{AnalysisException, Row, SparkSession}
 import org.apache.spark.sql.catalyst.TableIdentifier
 import org.apache.spark.sql.catalyst.analysis.Resolver
@@ -613,7 +614,7 @@ case class AlterTableRecoverPartitionsCommand(
     val hadoopConf = spark.sessionState.newHadoopConf()
     val fs = root.getFileSystem(hadoopConf)
 
-    val threshold = spark.conf.get("spark.rdd.parallelListingThreshold", 
"10").toInt
+    val threshold = spark.sparkContext.conf.get(RDD_PARALLEL_LISTING_THRESHOLD)
     val pathFilter = getPathFilter(hadoopConf)
 
     val evalPool = 
ThreadUtils.newForkJoinPool("AlterTableRecoverPartitionsCommand", 8)
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
index ba39dcf..4859bde 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLSuite.scala
@@ -25,6 +25,7 @@ import org.apache.hadoop.fs.Path
 import org.scalatest.BeforeAndAfterEach
 
 import org.apache.spark.internal.config
+import org.apache.spark.internal.config.RDD_PARALLEL_LISTING_THRESHOLD
 import org.apache.spark.sql.{AnalysisException, QueryTest, Row, SaveMode}
 import org.apache.spark.sql.catalyst.TableIdentifier
 import org.apache.spark.sql.catalyst.analysis.{FunctionRegistry, 
NoSuchPartitionException, NoSuchTableException, TempTableAlreadyExistsException}
@@ -1122,13 +1123,13 @@ abstract class DDLSuite extends QueryTest with 
SQLTestUtils {
   }
 
   test("alter table: recover partitions (sequential)") {
-    withSQLConf("spark.rdd.parallelListingThreshold" -> "10") {
+    withSQLConf(RDD_PARALLEL_LISTING_THRESHOLD.key -> "10") {
       testRecoverPartitions()
     }
   }
 
   test("alter table: recover partition (parallel)") {
-    withSQLConf("spark.rdd.parallelListingThreshold" -> "0") {
+    withSQLConf(RDD_PARALLEL_LISTING_THRESHOLD.key -> "0") {
       testRecoverPartitions()
     }
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to