This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/spark.git

commit b889e66a6cbc8642485fa4bda8a1e6e34ba1c621
Author: beliefer <[email protected]>
AuthorDate: Thu Mar 12 09:52:20 2020 +0900

    [SPARK-31002][CORE][DOC][FOLLOWUP] Add version information to the 
configuration of Core
    
    ### What changes were proposed in this pull request?
    This PR follows up https://github.com/apache/spark/pull/27847.
    I sorted out some information show below.
    
    Item name | Since version | JIRA ID | Commit ID | Note
    -- | -- | -- | -- | --
    spark.yarn.isPython | 1.5.0 | SPARK-5479 | 
38112905bc3b33f2ae75274afba1c30e116f6e46#diff-4d2ab44195558d5a9d5f15b8803ef39d |
    spark.task.cpus | 0.5.0 | None | 
e5c4cd8a5e188592f8786a265c0cd073c69ac886#diff-391214d132a0fb4478f4f9c2313d8966 
|  
    spark.dynamicAllocation.enabled | 1.2.0 | SPARK-3795 | 
8d59b37b02eb36f37bcefafb952519d7dca744ad#diff-364713d7776956cb8b0a771e9b62f82d 
|  
    spark.dynamicAllocation.testing | 1.2.0 | SPARK-3795 | 
8d59b37b02eb36f37bcefafb952519d7dca744ad#diff-364713d7776956cb8b0a771e9b62f82d 
|  
    spark.dynamicAllocation.minExecutors | 1.2.0 | SPARK-3795 | 
8d59b37b02eb36f37bcefafb952519d7dca744ad#diff-364713d7776956cb8b0a771e9b62f82d 
|  
    spark.dynamicAllocation.initialExecutors | 1.3.0 | SPARK-4585 | 
b2047b55c5fc85de6b63276d8ab9610d2496e08b#diff-b096353602813e47074ace09a3890d56 
|  
    spark.dynamicAllocation.maxExecutors | 1.2.0 | SPARK-3795 | 
8d59b37b02eb36f37bcefafb952519d7dca744ad#diff-364713d7776956cb8b0a771e9b62f82d 
|  
    spark.dynamicAllocation.executorAllocationRatio | 2.4.0 | SPARK-22683 | 
55c4ca88a3b093ee197a8689631be8d1fac1f10f#diff-6bdad48cfc34314e89599655442ff210 
|  
    spark.dynamicAllocation.cachedExecutorIdleTimeout | 1.4.0 | SPARK-7955 | 
6faaf15ba311bc3a79aae40a6c9c4befabb6889f#diff-b096353602813e47074ace09a3890d56 
|  
    spark.dynamicAllocation.executorIdleTimeout | 1.2.0 | SPARK-3795 | 
8d59b37b02eb36f37bcefafb952519d7dca744ad#diff-364713d7776956cb8b0a771e9b62f82d 
|  
    spark.dynamicAllocation.shuffleTracking.enabled | 3.0.0 | SPARK-27963 | 
2ddeff97d7329942a98ef363991eeabc3fa71a76#diff-6bdad48cfc34314e89599655442ff210 
|  
    spark.dynamicAllocation.shuffleTimeout | 3.0.0 | SPARK-27963 | 
2ddeff97d7329942a98ef363991eeabc3fa71a76#diff-6bdad48cfc34314e89599655442ff210 
|  
    spark.dynamicAllocation.schedulerBacklogTimeout | 1.2.0 | SPARK-3795 | 
8d59b37b02eb36f37bcefafb952519d7dca744ad#diff-364713d7776956cb8b0a771e9b62f82d 
|  
    spark.dynamicAllocation.sustainedSchedulerBacklogTimeout | 1.2.0 | 
SPARK-3795 | 
8d59b37b02eb36f37bcefafb952519d7dca744ad#diff-364713d7776956cb8b0a771e9b62f82d 
|  
    spark.locality.wait | 0.5.0 | None | 
e5c4cd8a5e188592f8786a265c0cd073c69ac886#diff-391214d132a0fb4478f4f9c2313d8966 
|  
    spark.shuffle.service.enabled | 1.2.0 | SPARK-3796 | 
f55218aeb1e9d638df6229b36a59a15ce5363482#diff-2b643ea78c1add0381754b1f47eec132 
|  
    Constants.SHUFFLE_SERVICE_FETCH_RDD_ENABLED | 3.0.0 | SPARK-27677 | 
e9f3f62b2c0f521f3cc23fef381fc6754853ad4f#diff-6bdad48cfc34314e89599655442ff210 
| spark.shuffle.service.fetch.rdd.enabled
    spark.shuffle.service.db.enabled | 3.0.0 | SPARK-26288 | 
8b0aa59218c209d39cbba5959302d8668b885cf6#diff-6bdad48cfc34314e89599655442ff210 
|  
    spark.shuffle.service.port | 1.2.0 | SPARK-3796 | 
f55218aeb1e9d638df6229b36a59a15ce5363482#diff-2b643ea78c1add0381754b1f47eec132 
|  
    spark.kerberos.keytab | 3.0.0 | SPARK-25372 | 
51540c2fa677658be954c820bc18ba748e4c8583#diff-6bdad48cfc34314e89599655442ff210 |
    spark.kerberos.principal | 3.0.0 | SPARK-25372 | 
51540c2fa677658be954c820bc18ba748e4c8583#diff-6bdad48cfc34314e89599655442ff210 |
    spark.kerberos.relogin.period | 3.0.0 | SPARK-23781 | 
68dde3481ea458b0b8deeec2f99233c2d4c1e056#diff-6bdad48cfc34314e89599655442ff210 |
    spark.kerberos.renewal.credentials | 3.0.0 | SPARK-26595 | 
2a67dbfbd341af166b1c85904875f26a6dea5ba8#diff-6bdad48cfc34314e89599655442ff210 
|  
    spark.kerberos.access.hadoopFileSystems | 3.0.0 | SPARK-26766 | 
d0443a74d185ec72b747fa39994fa9a40ce974cf#diff-6bdad48cfc34314e89599655442ff210 
|  
    spark.executor.instances | 1.0.0 | SPARK-1126 | 
1617816090e7b20124a512a43860a21232ebf511#diff-4d2ab44195558d5a9d5f15b8803ef39d 
|  
    spark.yarn.dist.pyFiles | 2.2.1 | SPARK-21714 | 
d10c9dc3f631a26dbbbd8f5c601ca2001a5d7c80#diff-6bdad48cfc34314e89599655442ff210 
|  
    spark.task.maxDirectResultSize | 2.0.0 | SPARK-13830 | 
2ef4c5963bff3574fe17e669d703b25ddd064e5d#diff-5a0de266c82b95adb47d9bca714e1f1b 
|  
    spark.task.maxFailures | 0.8.0 | None | 
46eecd110a4017ea0c86cbb1010d0ccd6a5eb2ef#diff-264da78fe625d594eae59d1adabc8ae9 
|  
    spark.task.reaper.enabled | 2.0.3 | SPARK-18761 | 
678d91c1d2283d9965a39656af9d383bad093ba8#diff-5a0de266c82b95adb47d9bca714e1f1b |
    spark.task.reaper.killTimeout | 2.0.3 | SPARK-18761 | 
678d91c1d2283d9965a39656af9d383bad093ba8#diff-5a0de266c82b95adb47d9bca714e1f1b |
    spark.task.reaper.pollingInterval | 2.0.3 | SPARK-18761 | 
678d91c1d2283d9965a39656af9d383bad093ba8#diff-5a0de266c82b95adb47d9bca714e1f1b |
    spark.task.reaper.threadDump | 2.0.3 | SPARK-18761 | 
678d91c1d2283d9965a39656af9d383bad093ba8#diff-5a0de266c82b95adb47d9bca714e1f1b |
    spark.blacklist.enabled | 2.1.0 | SPARK-17675 | 
9ce7d3e542e786c62f047c13f3001e178f76e06a#diff-6bdad48cfc34314e89599655442ff210 
|  
    spark.blacklist.task.maxTaskAttemptsPerExecutor | 2.1.0 | SPARK-17675 | 
9ce7d3e542e786c62f047c13f3001e178f76e06a#diff-6bdad48cfc34314e89599655442ff210 
|  
    spark.blacklist.task.maxTaskAttemptsPerNode | 2.1.0 | SPARK-17675 | 
9ce7d3e542e786c62f047c13f3001e178f76e06a#diff-6bdad48cfc34314e89599655442ff210 
|  
    spark.blacklist.application.maxFailedTasksPerExecutor | 2.2.0 | SPARK-8425 
| 
93cdb8a7d0f124b4db069fd8242207c82e263c52#diff-6bdad48cfc34314e89599655442ff210 
|  
    spark.blacklist.stage.maxFailedTasksPerExecutor | 2.1.0 | SPARK-17675 | 
9ce7d3e542e786c62f047c13f3001e178f76e06a#diff-6bdad48cfc34314e89599655442ff210 
|  
    spark.blacklist.application.maxFailedExecutorsPerNode | 2.2.0 | SPARK-8425 
| 
93cdb8a7d0f124b4db069fd8242207c82e263c52#diff-6bdad48cfc34314e89599655442ff210 
|  
    spark.blacklist.stage.maxFailedExecutorsPerNode | 2.1.0 | SPARK-17675 | 
9ce7d3e542e786c62f047c13f3001e178f76e06a#diff-6bdad48cfc34314e89599655442ff210 
|  
    spark.blacklist.timeout | 2.1.0 | SPARK-17675 | 
9ce7d3e542e786c62f047c13f3001e178f76e06a#diff-6bdad48cfc34314e89599655442ff210 
|  
    spark.blacklist.killBlacklistedExecutors | 2.2.0 | SPARK-16554 | 
6287c94f08200d548df5cc0a401b73b84f9968c4#diff-6bdad48cfc34314e89599655442ff210 
|  
    spark.scheduler.executorTaskBlacklistTime | 1.0.0 | None | 
ab747d39ddc7c8a314ed2fb26548fc5652af0d74#diff-bad3987c83bd22d46416d3dd9d208e76 |
    spark.blacklist.application.fetchFailure.enabled | 2.3.0 | SPARK-13669 and 
SPARK-20898 | 
9e50a1d37a4cf0c34e20a7c1a910ceaff41535a2#diff-6bdad48cfc34314e89599655442ff210 
|  
    spark.files.fetchFailure.unRegisterOutputOnHost | 2.3.0 | SPARK-19753 | 
dccc0aa3cf957c8eceac598ac81ac82f03b52105#diff-6bdad48cfc34314e89599655442ff210 
|  
    spark.scheduler.listenerbus.eventqueue.capacity | 2.3.0 | SPARK-20887 | 
629f38e171409da614fd635bd8dd951b7fde17a4#diff-6bdad48cfc34314e89599655442ff210 
|  
    spark.scheduler.listenerbus.metrics.maxListenerClassesTimed | 2.3.0 | 
SPARK-20863 | 
2a23cdd078a7409d0bb92cf27718995766c41b1d#diff-6bdad48cfc34314e89599655442ff210 
|  
    spark.scheduler.listenerbus.logSlowEvent | 3.0.0 | SPARK-30812 | 
68d7edf9497bea2f73707d32ab55dd8e53088e7c#diff-6bdad48cfc34314e89599655442ff210 
|  
    spark.scheduler.listenerbus.logSlowEvent.threshold | 3.0.0 | SPARK-29001 | 
0346afa8fc348aa1b3f5110df747a64e3b2da388#diff-6bdad48cfc34314e89599655442ff210 
|  
    
    ### Why are the changes needed?
    Supplemental configuration version information.
    
    ### Does this PR introduce any user-facing change?
    No
    
    ### How was this patch tested?
    Exists UT
    
    Closes #27852 from beliefer/add-version-to-core-config-part-two.
    
    Authored-by: beliefer <[email protected]>
    Signed-off-by: HyukjinKwon <[email protected]>
---
 .../org/apache/spark/internal/config/package.scala | 84 +++++++++++++++++++---
 docs/configuration.md                              | 31 ++++++++
 2 files changed, 104 insertions(+), 11 deletions(-)

diff --git a/core/src/main/scala/org/apache/spark/internal/config/package.scala 
b/core/src/main/scala/org/apache/spark/internal/config/package.scala
index f3195d9..12ae56c 100644
--- a/core/src/main/scala/org/apache/spark/internal/config/package.scala
+++ b/core/src/main/scala/org/apache/spark/internal/config/package.scala
@@ -461,68 +461,98 @@ package object config {
       .intConf
       .createWithDefault(5)
 
-  private[spark] val IS_PYTHON_APP = 
ConfigBuilder("spark.yarn.isPython").internal()
-    .booleanConf.createWithDefault(false)
+  private[spark] val IS_PYTHON_APP =
+    ConfigBuilder("spark.yarn.isPython")
+      .internal()
+      .version("1.5.0")
+      .booleanConf
+      .createWithDefault(false)
 
-  private[spark] val CPUS_PER_TASK = 
ConfigBuilder("spark.task.cpus").intConf.createWithDefault(1)
+  private[spark] val CPUS_PER_TASK =
+    
ConfigBuilder("spark.task.cpus").version("0.5.0").intConf.createWithDefault(1)
 
   private[spark] val DYN_ALLOCATION_ENABLED =
-    
ConfigBuilder("spark.dynamicAllocation.enabled").booleanConf.createWithDefault(false)
+    ConfigBuilder("spark.dynamicAllocation.enabled")
+      .version("1.2.0")
+      .booleanConf
+      .createWithDefault(false)
 
   private[spark] val DYN_ALLOCATION_TESTING =
-    
ConfigBuilder("spark.dynamicAllocation.testing").booleanConf.createWithDefault(false)
+    ConfigBuilder("spark.dynamicAllocation.testing")
+      .version("1.2.0")
+      .booleanConf
+      .createWithDefault(false)
 
   private[spark] val DYN_ALLOCATION_MIN_EXECUTORS =
-    
ConfigBuilder("spark.dynamicAllocation.minExecutors").intConf.createWithDefault(0)
+    ConfigBuilder("spark.dynamicAllocation.minExecutors")
+      .version("1.2.0")
+      .intConf
+      .createWithDefault(0)
 
   private[spark] val DYN_ALLOCATION_INITIAL_EXECUTORS =
     ConfigBuilder("spark.dynamicAllocation.initialExecutors")
+      .version("1.3.0")
       .fallbackConf(DYN_ALLOCATION_MIN_EXECUTORS)
 
   private[spark] val DYN_ALLOCATION_MAX_EXECUTORS =
-    
ConfigBuilder("spark.dynamicAllocation.maxExecutors").intConf.createWithDefault(Int.MaxValue)
+    ConfigBuilder("spark.dynamicAllocation.maxExecutors")
+      .version("1.2.0")
+      .intConf
+      .createWithDefault(Int.MaxValue)
 
   private[spark] val DYN_ALLOCATION_EXECUTOR_ALLOCATION_RATIO =
     ConfigBuilder("spark.dynamicAllocation.executorAllocationRatio")
-      .doubleConf.createWithDefault(1.0)
+      .version("2.4.0")
+      .doubleConf
+      .createWithDefault(1.0)
 
   private[spark] val DYN_ALLOCATION_CACHED_EXECUTOR_IDLE_TIMEOUT =
     ConfigBuilder("spark.dynamicAllocation.cachedExecutorIdleTimeout")
+      .version("1.4.0")
       .timeConf(TimeUnit.SECONDS)
       .checkValue(_ >= 0L, "Timeout must be >= 0.")
       .createWithDefault(Integer.MAX_VALUE)
 
   private[spark] val DYN_ALLOCATION_EXECUTOR_IDLE_TIMEOUT =
     ConfigBuilder("spark.dynamicAllocation.executorIdleTimeout")
+      .version("1.2.0")
       .timeConf(TimeUnit.SECONDS)
       .checkValue(_ >= 0L, "Timeout must be >= 0.")
       .createWithDefault(60)
 
   private[spark] val DYN_ALLOCATION_SHUFFLE_TRACKING =
     ConfigBuilder("spark.dynamicAllocation.shuffleTracking.enabled")
+      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
   private[spark] val DYN_ALLOCATION_SHUFFLE_TIMEOUT =
     ConfigBuilder("spark.dynamicAllocation.shuffleTimeout")
+      .version("3.0.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .checkValue(_ >= 0L, "Timeout must be >= 0.")
       .createWithDefault(Long.MaxValue)
 
   private[spark] val DYN_ALLOCATION_SCHEDULER_BACKLOG_TIMEOUT =
     ConfigBuilder("spark.dynamicAllocation.schedulerBacklogTimeout")
+      .version("1.2.0")
       .timeConf(TimeUnit.SECONDS).createWithDefault(1)
 
   private[spark] val DYN_ALLOCATION_SUSTAINED_SCHEDULER_BACKLOG_TIMEOUT =
     ConfigBuilder("spark.dynamicAllocation.sustainedSchedulerBacklogTimeout")
+      .version("1.2.0")
       .fallbackConf(DYN_ALLOCATION_SCHEDULER_BACKLOG_TIMEOUT)
 
   private[spark] val LOCALITY_WAIT = ConfigBuilder("spark.locality.wait")
+    .version("0.5.0")
     .timeConf(TimeUnit.MILLISECONDS)
     .createWithDefaultString("3s")
 
   private[spark] val SHUFFLE_SERVICE_ENABLED =
-    
ConfigBuilder("spark.shuffle.service.enabled").booleanConf.createWithDefault(false)
+    ConfigBuilder("spark.shuffle.service.enabled")
+      .version("1.2.0")
+      .booleanConf
+      .createWithDefault(false)
 
   private[spark] val SHUFFLE_SERVICE_FETCH_RDD_ENABLED =
     ConfigBuilder(Constants.SHUFFLE_SERVICE_FETCH_RDD_ENABLED)
@@ -530,6 +560,7 @@ package object config {
         "In case of dynamic allocation if this feature is enabled executors 
having only disk " +
         "persisted blocks are considered idle after " +
         "'spark.dynamicAllocation.executorIdleTimeout' and will be released 
accordingly.")
+      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -537,21 +568,26 @@ package object config {
     ConfigBuilder("spark.shuffle.service.db.enabled")
       .doc("Whether to use db in ExternalShuffleService. Note that this only 
affects " +
         "standalone mode.")
+      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
   private[spark] val SHUFFLE_SERVICE_PORT =
-    ConfigBuilder("spark.shuffle.service.port").intConf.createWithDefault(7337)
+    
ConfigBuilder("spark.shuffle.service.port").version("1.2.0").intConf.createWithDefault(7337)
 
   private[spark] val KEYTAB = ConfigBuilder("spark.kerberos.keytab")
     .doc("Location of user's keytab.")
+    .version("3.0.0")
     .stringConf.createOptional
 
   private[spark] val PRINCIPAL = ConfigBuilder("spark.kerberos.principal")
     .doc("Name of the Kerberos principal.")
-    .stringConf.createOptional
+    .version("3.0.0")
+    .stringConf
+    .createOptional
 
   private[spark] val KERBEROS_RELOGIN_PERIOD = 
ConfigBuilder("spark.kerberos.relogin.period")
+    .version("3.0.0")
     .timeConf(TimeUnit.SECONDS)
     .createWithDefaultString("1m")
 
@@ -561,6 +597,7 @@ package object config {
         "Which credentials to use when renewing delegation tokens for 
executors. Can be either " +
         "'keytab', the default, which requires a keytab to be provided, or 
'ccache', which uses " +
         "the local credentials cache.")
+      .version("3.0.0")
       .stringConf
       .checkValues(Set("keytab", "ccache"))
       .createWithDefault("keytab")
@@ -569,104 +606,124 @@ package object config {
     ConfigBuilder("spark.kerberos.access.hadoopFileSystems")
     .doc("Extra Hadoop filesystem URLs for which to request delegation tokens. 
The filesystem " +
       "that hosts fs.defaultFS does not need to be listed here.")
+    .version("3.0.0")
     .stringConf
     .toSequence
     .createWithDefault(Nil)
 
   private[spark] val EXECUTOR_INSTANCES = 
ConfigBuilder("spark.executor.instances")
+    .version("1.0.0")
     .intConf
     .createOptional
 
   private[spark] val PY_FILES = ConfigBuilder("spark.yarn.dist.pyFiles")
     .internal()
+    .version("2.2.1")
     .stringConf
     .toSequence
     .createWithDefault(Nil)
 
   private[spark] val TASK_MAX_DIRECT_RESULT_SIZE =
     ConfigBuilder("spark.task.maxDirectResultSize")
+      .version("2.0.0")
       .bytesConf(ByteUnit.BYTE)
       .createWithDefault(1L << 20)
 
   private[spark] val TASK_MAX_FAILURES =
     ConfigBuilder("spark.task.maxFailures")
+      .version("0.8.0")
       .intConf
       .createWithDefault(4)
 
   private[spark] val TASK_REAPER_ENABLED =
     ConfigBuilder("spark.task.reaper.enabled")
+      .version("2.0.3")
       .booleanConf
       .createWithDefault(false)
 
   private[spark] val TASK_REAPER_KILL_TIMEOUT =
     ConfigBuilder("spark.task.reaper.killTimeout")
+      .version("2.0.3")
       .timeConf(TimeUnit.MILLISECONDS)
       .createWithDefault(-1)
 
   private[spark] val TASK_REAPER_POLLING_INTERVAL =
     ConfigBuilder("spark.task.reaper.pollingInterval")
+      .version("2.0.3")
       .timeConf(TimeUnit.MILLISECONDS)
       .createWithDefaultString("10s")
 
   private[spark] val TASK_REAPER_THREAD_DUMP =
     ConfigBuilder("spark.task.reaper.threadDump")
+      .version("2.0.3")
       .booleanConf
       .createWithDefault(true)
 
   // Blacklist confs
   private[spark] val BLACKLIST_ENABLED =
     ConfigBuilder("spark.blacklist.enabled")
+      .version("2.1.0")
       .booleanConf
       .createOptional
 
   private[spark] val MAX_TASK_ATTEMPTS_PER_EXECUTOR =
     ConfigBuilder("spark.blacklist.task.maxTaskAttemptsPerExecutor")
+      .version("2.1.0")
       .intConf
       .createWithDefault(1)
 
   private[spark] val MAX_TASK_ATTEMPTS_PER_NODE =
     ConfigBuilder("spark.blacklist.task.maxTaskAttemptsPerNode")
+      .version("2.1.0")
       .intConf
       .createWithDefault(2)
 
   private[spark] val MAX_FAILURES_PER_EXEC =
     ConfigBuilder("spark.blacklist.application.maxFailedTasksPerExecutor")
+      .version("2.2.0")
       .intConf
       .createWithDefault(2)
 
   private[spark] val MAX_FAILURES_PER_EXEC_STAGE =
     ConfigBuilder("spark.blacklist.stage.maxFailedTasksPerExecutor")
+      .version("2.1.0")
       .intConf
       .createWithDefault(2)
 
   private[spark] val MAX_FAILED_EXEC_PER_NODE =
     ConfigBuilder("spark.blacklist.application.maxFailedExecutorsPerNode")
+      .version("2.2.0")
       .intConf
       .createWithDefault(2)
 
   private[spark] val MAX_FAILED_EXEC_PER_NODE_STAGE =
     ConfigBuilder("spark.blacklist.stage.maxFailedExecutorsPerNode")
+      .version("2.1.0")
       .intConf
       .createWithDefault(2)
 
   private[spark] val BLACKLIST_TIMEOUT_CONF =
     ConfigBuilder("spark.blacklist.timeout")
+      .version("2.1.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .createOptional
 
   private[spark] val BLACKLIST_KILL_ENABLED =
     ConfigBuilder("spark.blacklist.killBlacklistedExecutors")
+      .version("2.2.0")
       .booleanConf
       .createWithDefault(false)
 
   private[spark] val BLACKLIST_LEGACY_TIMEOUT_CONF =
     ConfigBuilder("spark.scheduler.executorTaskBlacklistTime")
       .internal()
+      .version("1.0.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .createOptional
 
   private[spark] val BLACKLIST_FETCH_FAILURE_ENABLED =
     ConfigBuilder("spark.blacklist.application.fetchFailure.enabled")
+      .version("2.3.0")
       .booleanConf
       .createWithDefault(false)
   // End blacklist confs
@@ -676,6 +733,7 @@ package object config {
       .doc("Whether to un-register all the outputs on the host in condition 
that we receive " +
         " a FetchFailure. This is set default to false, which means, we only 
un-register the " +
         " outputs related to the exact executor(instead of the host) on a 
FetchFailure.")
+      .version("2.3.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -685,6 +743,7 @@ package object config {
         "an event queue using capacity specified by 
`spark.scheduler.listenerbus" +
         ".eventqueue.queueName.capacity` first. If it's not configured, Spark 
will " +
         "use the default capacity specified by this config.")
+      .version("2.3.0")
       .intConf
       .checkValue(_ > 0, "The capacity of listener bus event queue must be 
positive")
       .createWithDefault(10000)
@@ -692,6 +751,7 @@ package object config {
   private[spark] val LISTENER_BUS_METRICS_MAX_LISTENER_CLASSES_TIMED =
     
ConfigBuilder("spark.scheduler.listenerbus.metrics.maxListenerClassesTimed")
       .internal()
+      .version("2.3.0")
       .intConf
       .createWithDefault(128)
 
@@ -701,6 +761,7 @@ package object config {
       .doc("When enabled, log the event that takes too much time to process. 
This helps us " +
         "discover the event types that cause performance bottlenecks. The time 
threshold is " +
         "controlled by spark.scheduler.listenerbus.logSlowEvent.threshold.")
+      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -709,6 +770,7 @@ package object config {
       .internal()
       .doc("The time threshold of whether a event is considered to be taking 
too much time to " +
         s"process. Log the event if ${LISTENER_BUS_LOG_SLOW_EVENT_ENABLED.key} 
is true.")
+      .version("3.0.0")
       .timeConf(TimeUnit.NANOSECONDS)
       .createWithDefaultString("1s")
 
diff --git a/docs/configuration.md b/docs/configuration.md
index 4f3f542..4f601c8 100644
--- a/docs/configuration.md
+++ b/docs/configuration.md
@@ -877,6 +877,7 @@ Apart from these, the following properties are also 
available, and may be useful
     <a href="job-scheduling.html#configuration-and-setup">dynamic allocation
     configuration and setup documentation</a> for more information.
   </td>
+  <td>1.2.0</td>
 </tr>
 <tr>
   <td><code>spark.shuffle.service.port</code></td>
@@ -884,6 +885,7 @@ Apart from these, the following properties are also 
available, and may be useful
   <td>
     Port on which the external shuffle service will run.
   </td>
+  <td>1.2.0</td>
 </tr>
 <tr>
   <td><code>spark.shuffle.service.index.cache.size</code></td>
@@ -1916,6 +1918,7 @@ Apart from these, the following properties are also 
available, and may be useful
     You should increase this setting if your tasks are long and see poor 
locality, but the
     default usually works well.
   </td>
+  <td>0.5.0</td>
 </tr>
 <tr>
   <td><code>spark.locality.wait.node</code></td>
@@ -1987,6 +1990,7 @@ Apart from these, the following properties are also 
available, and may be useful
     config. Note that capacity must be greater than 0. Consider increasing 
value (e.g. 20000) 
     if listener events are dropped. Increasing this value may result in the 
driver using more memory.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.scheduler.listenerbus.eventqueue.shared.capacity</code></td>
@@ -2051,6 +2055,7 @@ Apart from these, the following properties are also 
available, and may be useful
     due to too many task failures. The blacklisting algorithm can be further 
controlled by the
     other "spark.blacklist" configuration options.
   </td>
+  <td>2.1.0</td>
 </tr>
 <tr>
   <td><code>spark.blacklist.timeout</code></td>
@@ -2059,6 +2064,7 @@ Apart from these, the following properties are also 
available, and may be useful
     (Experimental) How long a node or executor is blacklisted for the entire 
application, before it
     is unconditionally removed from the blacklist to attempt running new tasks.
   </td>
+  <td>2.1.0</td>
 </tr>
 <tr>
   <td><code>spark.blacklist.task.maxTaskAttemptsPerExecutor</code></td>
@@ -2067,6 +2073,7 @@ Apart from these, the following properties are also 
available, and may be useful
     (Experimental) For a given task, how many times it can be retried on one 
executor before the
     executor is blacklisted for that task.
   </td>
+  <td>2.1.0</td>
 </tr>
 <tr>
   <td><code>spark.blacklist.task.maxTaskAttemptsPerNode</code></td>
@@ -2075,6 +2082,7 @@ Apart from these, the following properties are also 
available, and may be useful
     (Experimental) For a given task, how many times it can be retried on one 
node, before the entire
     node is blacklisted for that task.
   </td>
+  <td>2.1.0</td>
 </tr>
 <tr>
   <td><code>spark.blacklist.stage.maxFailedTasksPerExecutor</code></td>
@@ -2083,6 +2091,7 @@ Apart from these, the following properties are also 
available, and may be useful
     (Experimental) How many different tasks must fail on one executor, within 
one stage, before the
     executor is blacklisted for that stage.
   </td>
+  <td>2.1.0</td>
 </tr>
 <tr>
   <td><code>spark.blacklist.stage.maxFailedExecutorsPerNode</code></td>
@@ -2091,6 +2100,7 @@ Apart from these, the following properties are also 
available, and may be useful
     (Experimental) How many different executors are marked as blacklisted for 
a given stage, before
     the entire node is marked as failed for the stage.
   </td>
+  <td>2.1.0</td>
 </tr>
 <tr>
   <td><code>spark.blacklist.application.maxFailedTasksPerExecutor</code></td>
@@ -2102,6 +2112,7 @@ Apart from these, the following properties are also 
available, and may be useful
     <code>spark.blacklist.timeout</code>.  Note that with dynamic allocation, 
though, the executors
     may get marked as idle and be reclaimed by the cluster manager.
   </td>
+  <td>2.2.0</td>
 </tr>
 <tr>
   <td><code>spark.blacklist.application.maxFailedExecutorsPerNode</code></td>
@@ -2113,6 +2124,7 @@ Apart from these, the following properties are also 
available, and may be useful
     <code>spark.blacklist.timeout</code>.  Note that with dynamic allocation, 
though, the executors
     on the node may get marked as idle and be reclaimed by the cluster manager.
   </td>
+  <td>2.2.0</td>
 </tr>
 <tr>
   <td><code>spark.blacklist.killBlacklistedExecutors</code></td>
@@ -2123,6 +2135,7 @@ Apart from these, the following properties are also 
available, and may be useful
     as controlled by spark.blacklist.application.*. Note that, when an entire 
node is added 
     to the blacklist, all of the executors on that node will be killed.
   </td>
+  <td>2.2.0</td>
 </tr>
 <tr>
   <td><code>spark.blacklist.application.fetchFailure.enabled</code></td>
@@ -2132,6 +2145,7 @@ Apart from these, the following properties are also 
available, and may be useful
     failure happens. If external shuffle service is enabled, then the whole 
node will be
     blacklisted.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.speculation</code></td>
@@ -2182,6 +2196,7 @@ Apart from these, the following properties are also 
available, and may be useful
   <td>
     Number of cores to allocate for each task.
   </td>
+  <td>0.5.0</td>
 </tr>
 <tr>
   <td><code>spark.task.resource.{resourceName}.amount</code></td>
@@ -2207,6 +2222,7 @@ Apart from these, the following properties are also 
available, and may be useful
     to fail; a particular task has to fail this number of attempts.
     Should be greater than or equal to 1. Number of allowed retries = this 
value - 1.
   </td>
+  <td>0.8.0</td>
 </tr>
 <tr>
   <td><code>spark.task.reaper.enabled</code></td>
@@ -2218,6 +2234,7 @@ Apart from these, the following properties are also 
available, and may be useful
     of this monitoring. When set to false (the default), task killing will use 
an older code
     path which lacks such monitoring.
   </td>
+  <td>2.0.3</td>
 </tr>
 <tr>
   <td><code>spark.task.reaper.pollingInterval</code></td>
@@ -2229,6 +2246,7 @@ Apart from these, the following properties are also 
available, and may be useful
     (this thread dump can be disabled via the 
<code>spark.task.reaper.threadDump</code> setting,
     which is documented below).
   </td>
+  <td>2.0.3</td>
 </tr>
 <tr>
   <td><code>spark.task.reaper.threadDump</code></td>
@@ -2238,6 +2256,7 @@ Apart from these, the following properties are also 
available, and may be useful
     dumps are logged during periodic polling of killed tasks. Set this to 
false to disable
     collection of thread dumps.
   </td>
+  <td>2.0.3</td>
 </tr>
 <tr>
   <td><code>spark.task.reaper.killTimeout</code></td>
@@ -2249,6 +2268,7 @@ Apart from these, the following properties are also 
available, and may be useful
     of this setting is to act as a safety-net to prevent runaway 
noncancellable tasks from rendering
     an executor unusable.
   </td>
+  <td>2.0.3</td>
 </tr>
 <tr>
   <td><code>spark.stage.maxConsecutiveAttempts</code></td>
@@ -2324,6 +2344,7 @@ Apart from these, the following properties are also 
available, and may be useful
     <code>spark.dynamicAllocation.initialExecutors</code>
     <code>spark.dynamicAllocation.executorAllocationRatio</code>
   </td>
+  <td>1.2.0</td>
 </tr>
 <tr>
   <td><code>spark.dynamicAllocation.executorIdleTimeout</code></td>
@@ -2333,6 +2354,7 @@ Apart from these, the following properties are also 
available, and may be useful
     the executor will be removed. For more detail, see this
     <a href="job-scheduling.html#resource-allocation-policy">description</a>.
   </td>
+  <td>1.2.0</td>
 </tr>
 <tr>
   <td><code>spark.dynamicAllocation.cachedExecutorIdleTimeout</code></td>
@@ -2342,6 +2364,7 @@ Apart from these, the following properties are also 
available, and may be useful
     the executor will be removed. For more details, see this
     <a href="job-scheduling.html#resource-allocation-policy">description</a>.
   </td>
+  <td>1.4.0</td>
 </tr>
 <tr>
   <td><code>spark.dynamicAllocation.initialExecutors</code></td>
@@ -2352,6 +2375,7 @@ Apart from these, the following properties are also 
available, and may be useful
     If `--num-executors` (or `spark.executor.instances`) is set and larger 
than this value, it will
     be used as the initial number of executors.
   </td>
+  <td>1.3.0</td>
 </tr>
 <tr>
   <td><code>spark.dynamicAllocation.maxExecutors</code></td>
@@ -2359,6 +2383,7 @@ Apart from these, the following properties are also 
available, and may be useful
   <td>
     Upper bound for the number of executors if dynamic allocation is enabled.
   </td>
+  <td>1.2.0</td>
 </tr>
 <tr>
   <td><code>spark.dynamicAllocation.minExecutors</code></td>
@@ -2366,6 +2391,7 @@ Apart from these, the following properties are also 
available, and may be useful
   <td>
     Lower bound for the number of executors if dynamic allocation is enabled.
   </td>
+  <td>1.2.0</td>
 </tr>
 <tr>
   <td><code>spark.dynamicAllocation.executorAllocationRatio</code></td>
@@ -2383,6 +2409,7 @@ Apart from these, the following properties are also 
available, and may be useful
     by the <code>spark.dynamicAllocation.minExecutors</code> and
     <code>spark.dynamicAllocation.maxExecutors</code> settings
   </td>
+  <td>2.4.0</td>
 </tr>
 <tr>
   <td><code>spark.dynamicAllocation.schedulerBacklogTimeout</code></td>
@@ -2392,6 +2419,7 @@ Apart from these, the following properties are also 
available, and may be useful
     this duration, new executors will be requested. For more detail, see this
     <a href="job-scheduling.html#resource-allocation-policy">description</a>.
   </td>
+  <td>1.2.0</td>
 </tr>
 <tr>
   
<td><code>spark.dynamicAllocation.sustainedSchedulerBacklogTimeout</code></td>
@@ -2401,6 +2429,7 @@ Apart from these, the following properties are also 
available, and may be useful
     subsequent executor requests. For more detail, see this
     <a href="job-scheduling.html#resource-allocation-policy">description</a>.
   </td>
+  <td>1.2.0</td>
 </tr>
 <tr>
   <td><code>spark.dynamicAllocation.shuffleTracking.enabled</code></td>
@@ -2410,6 +2439,7 @@ Apart from these, the following properties are also 
available, and may be useful
     without the need for an external shuffle service. This option will try to 
keep alive executors
     that are storing shuffle data for active jobs.
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.dynamicAllocation.shuffleTimeout</code></td>
@@ -2421,6 +2451,7 @@ Apart from these, the following properties are also 
available, and may be useful
     quickly enough, this option can be used to control when to time out 
executors even when they are
     storing shuffle data.
   </td>
+  <td>3.0.0</td>
 </tr>
 </table>
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to