This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new a0cf972  [SPARK-31141][DSTREAMS][DOC] Add version information to the 
configuration of Dstreams
a0cf972 is described below

commit a0cf97298523aa3d22d08aa21dba3b501d585248
Author: beliefer <[email protected]>
AuthorDate: Mon Mar 23 13:01:44 2020 +0900

    [SPARK-31141][DSTREAMS][DOC] Add version information to the configuration 
of Dstreams
    
    ### What changes were proposed in this pull request?
    Add version information to the configuration of `Dstreams`.
    
    I sorted out some information show below.
    
    Item name | Since version | JIRA ID | Commit ID | Note
    -- | -- | -- | -- | --
    spark.streaming.backpressure.enabled | 1.5.0 | SPARK-9967 and SPARK-10099 | 
392bd19d678567751cd3844d9d166a7491c5887e#diff-1b584c4ed88a9022abb11d594f760997 
|  
    spark.streaming.backpressure.initialRate | 2.0.0 | SPARK-11627 | 
7218c0eba957e0a079a407b79c3a050cce9647b2#diff-c64d571ef32d2dbf76e965ecd04a9f52 
|  
    spark.streaming.blockInterval | 0.8.0 | None | 
46eecd110a4017ea0c86cbb1010d0ccd6a5eb2ef#diff-54d85b29e4349628a0de525c119399b5 
|  
    spark.streaming.receiver.maxRate | 1.0.2 | SPARK-1341 | 
ca19cfbcd5cfac9ad731350dfeea14355aec87d6#diff-c64d571ef32d2dbf76e965ecd04a9f52 
|  
    spark.streaming.receiver.writeAheadLog.enable | 1.2.1 | SPARK-4482 | 
ce5ea0fd611ce560f6e1fac83562469bdb97091e#diff-0607b70e4e79cbbc1a128c45784cb813 
|  
    spark.streaming.unpersist | 0.9.0 | None | 
08b9fec93d00ff0ebb49af4d9ac72d2806eded02#diff-bcf5f84f78d23ebde7d532bea756bc57 
|  
    spark.streaming.stopGracefullyOnShutdown | 1.4.0 | SPARK-7776 | 
a17a5cb302c5fa6a4d3e9e3e0fa2100c0b5436d6#diff-8a7f0e3f26c15ba484e6312c3caf033d 
|  
    spark.streaming.kafka.maxRetries | 1.3.0 | SPARK-4964 | 
a119cae48030520da9f26ee9a1270bed7f33031e#diff-26cb4369f86050dc2e75cd16291b2844 
|  
    spark.streaming.ui.retainedBatches | 1.0.0 | SPARK-1386 | 
f36dc3fed0a0671b0712d664db859da28c0a98e2#diff-56b8d67d07284cfab165d5363bd3500e |
    spark.streaming.driver.writeAheadLog.closeFileAfterWrite | 1.6.0 | 
SPARK-11324 | 
4f030b9e82172659d250281782ac573cbd1438fc#diff-a1b3ec72e8d7cc91433a1cc64fe6e91d 
|  
    spark.streaming.receiver.writeAheadLog.closeFileAfterWrite | 1.6.0 | 
SPARK-11324 | 
4f030b9e82172659d250281782ac573cbd1438fc#diff-a1b3ec72e8d7cc91433a1cc64fe6e91d 
|  
    spark.streaming.receiver.writeAheadLog.class | 1.4.0 | SPARK-7056 | 
1868bd40dcce23990b98748b0239bd00452b1ca5#diff-a1b3ec72e8d7cc91433a1cc64fe6e91d 
|  
    spark.streaming.receiver.writeAheadLog.rollingIntervalSecs | 1.4.0 | 
SPARK-7056 | 
1868bd40dcce23990b98748b0239bd00452b1ca5#diff-a1b3ec72e8d7cc91433a1cc64fe6e91d 
|  
    spark.streaming.receiver.writeAheadLog.maxFailures | 1.2.0 | SPARK-4028 | 
234de9232bcfa212317a8073c4a82c3863b36b14#diff-8cec1a581eebcad673dc8930b1a2801c 
|  
    spark.streaming.driver.writeAheadLog.class | 1.4.0 | SPARK-7056 | 
1868bd40dcce23990b98748b0239bd00452b1ca5#diff-a1b3ec72e8d7cc91433a1cc64fe6e91d 
|  
    spark.streaming.driver.writeAheadLog.rollingIntervalSecs | 1.4.0 | 
SPARK-7056 | 
1868bd40dcce23990b98748b0239bd00452b1ca5#diff-a1b3ec72e8d7cc91433a1cc64fe6e91d 
|  
    spark.streaming.driver.writeAheadLog.maxFailures | 1.4.0 | SPARK-7056 | 
1868bd40dcce23990b98748b0239bd00452b1ca5#diff-a1b3ec72e8d7cc91433a1cc64fe6e91d 
|  
    spark.streaming.driver.writeAheadLog.allowBatching | 1.6.0 | SPARK-11141 | 
dccc4645df629f35c4788d50b2c0a6ab381db4b7#diff-a1b3ec72e8d7cc91433a1cc64fe6e91d 
|  
    spark.streaming.driver.writeAheadLog.batchingTimeout | 1.6.0 | SPARK-11141 
| 
dccc4645df629f35c4788d50b2c0a6ab381db4b7#diff-a1b3ec72e8d7cc91433a1cc64fe6e91d 
|  
    spark.streaming.sessionByKey.deltaChainThreshold | 1.6.0 | SPARK-11290 | 
daa74be6f863061221bb0c2f94e70672e6fcbeaa#diff-e0a40541298f885606a2361ff9c5af6c 
|  
    spark.streaming.backpressure.rateEstimator | 1.5.0 | SPARK-8977 | 
819be46e5a73f2d19230354ebba30c58538590f5#diff-5dcaea3a4eca07f898fa88fe6d69e5c3 
|  
    spark.streaming.backpressure.pid.proportional | 1.5.0 | SPARK-8979 | 
0a1d2ca42c8b31d6b0e70163795f0185d4622f87#diff-5dcaea3a4eca07f898fa88fe6d69e5c3 
|  
    spark.streaming.backpressure.pid.integral | 1.5.0 | SPARK-8979 | 
0a1d2ca42c8b31d6b0e70163795f0185d4622f87#diff-5dcaea3a4eca07f898fa88fe6d69e5c3 
|  
    spark.streaming.backpressure.pid.derived | 1.5.0 | SPARK-8979 | 
0a1d2ca42c8b31d6b0e70163795f0185d4622f87#diff-5dcaea3a4eca07f898fa88fe6d69e5c3 
|  
    spark.streaming.backpressure.pid.minRate | 1.5.0 | SPARK-9966 | 
612b4609bdd38763725ae07d77c2176aa6756e64#diff-5dcaea3a4eca07f898fa88fe6d69e5c3 
|  
    spark.streaming.concurrentJobs | 0.7.0 | None | 
c97ebf64377e853ab7c616a103869a4417f25954#diff-839f06302b2d648a85436486fc13c85d 
|  
    spark.streaming.internal.batchTime | 1.4.0 | SPARK-6862 | 
1b7106b867bc0aa4d64b669d79b646f862acaf47#diff-25124e4f06a1da237bf486eceb1f7967 
| It's not a configuration, it's a property
    spark.streaming.internal.outputOpId | 1.4.0 | SPARK-6862 | 
1b7106b867bc0aa4d64b669d79b646f862acaf47#diff-25124e4f06a1da237bf486eceb1f7967 
| It's not a configuration, it's a property
    spark.streaming.clock | 0.7.0 | None | 
cae894ee7aefa4cf9b1952038a48be81e1d2a856#diff-839f06302b2d648a85436486fc13c85d 
|  
    spark.streaming.gracefulStopTimeout | 1.0.0 | SPARK-1332 | 
94cbe2329021296b660d88f3e8ef3734374020d2#diff-2f8c5c038fda47b9875e10785fdd2498 
|  
    spark.streaming.manualClock.jump | 0.7.0 | None | 
fc3d0b602a08fdd182c2138506d1cd9952631f95#diff-839f06302b2d648a85436486fc13c85d 
|  
    
    ### Why are the changes needed?
    Supplemental configuration version information.
    
    ### Does this PR introduce any user-facing change?
    'No'
    
    ### How was this patch tested?
    Exists UT
    
    Closes #27898 from beliefer/add-version-to-dstream-config.
    
    Authored-by: beliefer <[email protected]>
    Signed-off-by: HyukjinKwon <[email protected]>
---
 docs/configuration.md                              | 24 +++++++++++++------
 .../org/apache/spark/streaming/StreamingConf.scala | 27 ++++++++++++++++++++++
 2 files changed, 44 insertions(+), 7 deletions(-)

diff --git a/docs/configuration.md b/docs/configuration.md
index f9c8a360..9e5f5b6 100644
--- a/docs/configuration.md
+++ b/docs/configuration.md
@@ -2573,7 +2573,7 @@ Spark subsystems.
 ### Spark Streaming
 
 <table class="table">
-<tr><th>Property Name</th><th>Default</th><th>Meaning</th></tr>
+<tr><th>Property Name</th><th>Default</th><th>Meaning</th><th>Since 
Version</th></tr>
 <tr>
   <td><code>spark.streaming.backpressure.enabled</code></td>
   <td>false</td>
@@ -2586,6 +2586,7 @@ Spark subsystems.
     <code>spark.streaming.receiver.maxRate</code> and 
<code>spark.streaming.kafka.maxRatePerPartition</code>
     if they are set (see below).
   </td>
+  <td>1.5.0</td>
 </tr>
 <tr>
   <td><code>spark.streaming.backpressure.initialRate</code></td>
@@ -2594,6 +2595,7 @@ Spark subsystems.
     This is the initial maximum receiving rate at which each receiver will 
receive data for the
     first batch when the backpressure mechanism is enabled.
   </td>
+  <td>2.0.0</td>
 </tr>
 <tr>
   <td><code>spark.streaming.blockInterval</code></td>
@@ -2604,6 +2606,7 @@ Spark subsystems.
     <a 
href="streaming-programming-guide.html#level-of-parallelism-in-data-receiving">performance
      tuning</a> section in the Spark Streaming programming guide for more 
details.
   </td>
+  <td>0.8.0</td>
 </tr>
 <tr>
   <td><code>spark.streaming.receiver.maxRate</code></td>
@@ -2615,6 +2618,7 @@ Spark subsystems.
     See the <a 
href="streaming-programming-guide.html#deploying-applications">deployment 
guide</a>
     in the Spark Streaming programming guide for mode details.
   </td>
+  <td>1.0.2</td>
 </tr>
 <tr>
   <td><code>spark.streaming.receiver.writeAheadLog.enable</code></td>
@@ -2625,6 +2629,7 @@ Spark subsystems.
     See the <a 
href="streaming-programming-guide.html#deploying-applications">deployment 
guide</a>
     in the Spark Streaming programming guide for more details.
   </td>
+  <td>1.2.1</td>
 </tr>
 <tr>
   <td><code>spark.streaming.unpersist</code></td>
@@ -2636,6 +2641,7 @@ Spark subsystems.
     streaming application as they will not be cleared automatically. But it 
comes at the cost of
     higher memory usage in Spark.
   </td>
+  <td>0.9.0</td>
 </tr>
 <tr>
   <td><code>spark.streaming.stopGracefullyOnShutdown</code></td>
@@ -2644,6 +2650,7 @@ Spark subsystems.
     If <code>true</code>, Spark shuts down the <code>StreamingContext</code> 
gracefully on JVM
     shutdown rather than immediately.
   </td>
+  <td>1.4.0</td>
 </tr>
 <tr>
   <td><code>spark.streaming.kafka.maxRatePerPartition</code></td>
@@ -2656,12 +2663,12 @@ Spark subsystems.
   </td>
 </tr>
 <tr>
-    <td><code>spark.streaming.kafka.minRatePerPartition</code></td>
-    <td>1</td>
-    <td>
-      Minimum rate (number of records per second) at which data will be read 
from each Kafka
-      partition when using the new Kafka direct stream API.
-    </td>
+  <td><code>spark.streaming.kafka.minRatePerPartition</code></td>
+  <td>1</td>
+  <td>
+    Minimum rate (number of records per second) at which data will be read 
from each Kafka
+    partition when using the new Kafka direct stream API.
+  </td>
 </tr>
 <tr>
   <td><code>spark.streaming.ui.retainedBatches</code></td>
@@ -2669,6 +2676,7 @@ Spark subsystems.
   <td>
     How many batches the Spark Streaming UI and status APIs remember before 
garbage collecting.
   </td>
+  <td>1.0.0</td>
 </tr>
 <tr>
   
<td><code>spark.streaming.driver.writeAheadLog.closeFileAfterWrite</code></td>
@@ -2678,6 +2686,7 @@ Spark subsystems.
     when you want to use S3 (or any file system that does not support 
flushing) for the metadata WAL
     on the driver.
   </td>
+  <td>1.6.0</td>
 </tr>
 <tr>
   
<td><code>spark.streaming.receiver.writeAheadLog.closeFileAfterWrite</code></td>
@@ -2687,6 +2696,7 @@ Spark subsystems.
     when you want to use S3 (or any file system that does not support 
flushing) for the data WAL
     on the receivers.
   </td>
+  <td>1.6.0</td>
 </tr>
 </table>
 
diff --git 
a/streaming/src/main/scala/org/apache/spark/streaming/StreamingConf.scala 
b/streaming/src/main/scala/org/apache/spark/streaming/StreamingConf.scala
index 71aefd6..bb80bd7 100644
--- a/streaming/src/main/scala/org/apache/spark/streaming/StreamingConf.scala
+++ b/streaming/src/main/scala/org/apache/spark/streaming/StreamingConf.scala
@@ -26,135 +26,162 @@ object StreamingConf {
 
   private[streaming] val BACKPRESSURE_ENABLED =
     ConfigBuilder("spark.streaming.backpressure.enabled")
+      .version("1.5.0")
       .booleanConf
       .createWithDefault(false)
 
   private[streaming] val RECEIVER_MAX_RATE =
     ConfigBuilder("spark.streaming.receiver.maxRate")
+      .version("1.0.2")
       .longConf
       .createWithDefault(Long.MaxValue)
 
   private[streaming] val BACKPRESSURE_INITIAL_RATE =
     ConfigBuilder("spark.streaming.backpressure.initialRate")
+      .version("2.0.0")
       .fallbackConf(RECEIVER_MAX_RATE)
 
   private[streaming] val BLOCK_INTERVAL =
     ConfigBuilder("spark.streaming.blockInterval")
+      .version("0.8.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .createWithDefaultString("200ms")
 
   private[streaming] val RECEIVER_WAL_ENABLE_CONF_KEY =
     ConfigBuilder("spark.streaming.receiver.writeAheadLog.enable")
+      .version("1.2.1")
       .booleanConf
       .createWithDefault(false)
 
   private[streaming] val RECEIVER_WAL_CLASS_CONF_KEY =
     ConfigBuilder("spark.streaming.receiver.writeAheadLog.class")
+      .version("1.4.0")
       .stringConf
       .createOptional
 
   private[streaming] val RECEIVER_WAL_ROLLING_INTERVAL_CONF_KEY =
     ConfigBuilder("spark.streaming.receiver.writeAheadLog.rollingIntervalSecs")
+      .version("1.4.0")
       .intConf
       .createWithDefault(60)
 
   private[streaming] val RECEIVER_WAL_MAX_FAILURES_CONF_KEY =
     ConfigBuilder("spark.streaming.receiver.writeAheadLog.maxFailures")
+      .version("1.2.0")
       .intConf
       .createWithDefault(3)
 
   private[streaming] val RECEIVER_WAL_CLOSE_AFTER_WRITE_CONF_KEY =
     ConfigBuilder("spark.streaming.receiver.writeAheadLog.closeFileAfterWrite")
+      .version("1.6.0")
       .booleanConf
       .createWithDefault(false)
 
   private[streaming] val DRIVER_WAL_CLASS_CONF_KEY =
     ConfigBuilder("spark.streaming.driver.writeAheadLog.class")
+      .version("1.4.0")
       .stringConf
       .createOptional
 
   private[streaming] val DRIVER_WAL_ROLLING_INTERVAL_CONF_KEY =
     ConfigBuilder("spark.streaming.driver.writeAheadLog.rollingIntervalSecs")
+      .version("1.4.0")
       .intConf
       .createWithDefault(60)
 
   private[streaming] val DRIVER_WAL_MAX_FAILURES_CONF_KEY =
     ConfigBuilder("spark.streaming.driver.writeAheadLog.maxFailures")
+      .version("1.4.0")
       .intConf
       .createWithDefault(3)
 
   private[streaming] val DRIVER_WAL_CLOSE_AFTER_WRITE_CONF_KEY =
     ConfigBuilder("spark.streaming.driver.writeAheadLog.closeFileAfterWrite")
+      .version("1.6.0")
       .booleanConf
       .createWithDefault(false)
 
   private[streaming] val DRIVER_WAL_BATCHING_CONF_KEY =
     ConfigBuilder("spark.streaming.driver.writeAheadLog.allowBatching")
+      .version("1.6.0")
       .booleanConf
       .createWithDefault(true)
 
   private[streaming] val DRIVER_WAL_BATCHING_TIMEOUT_CONF_KEY =
     ConfigBuilder("spark.streaming.driver.writeAheadLog.batchingTimeout")
+      .version("1.6.0")
       .longConf
       .createWithDefault(5000)
 
   private[streaming] val STREAMING_UNPERSIST =
     ConfigBuilder("spark.streaming.unpersist")
+      .version("0.9.0")
       .booleanConf
       .createWithDefault(true)
 
   private[streaming] val STOP_GRACEFULLY_ON_SHUTDOWN =
     ConfigBuilder("spark.streaming.stopGracefullyOnShutdown")
+      .version("1.4.0")
       .booleanConf
       .createWithDefault(false)
 
   private[streaming] val UI_RETAINED_BATCHES =
     ConfigBuilder("spark.streaming.ui.retainedBatches")
+      .version("1.0.0")
       .intConf
       .createWithDefault(1000)
 
   private[streaming] val SESSION_BY_KEY_DELTA_CHAIN_THRESHOLD =
     ConfigBuilder("spark.streaming.sessionByKey.deltaChainThreshold")
+      .version("1.6.0")
       .intConf
       .createWithDefault(DELTA_CHAIN_LENGTH_THRESHOLD)
 
   private[streaming] val BACKPRESSURE_RATE_ESTIMATOR =
     ConfigBuilder("spark.streaming.backpressure.rateEstimator")
+      .version("1.5.0")
       .stringConf
       .createWithDefault("pid")
 
   private[streaming] val BACKPRESSURE_PID_PROPORTIONAL =
     ConfigBuilder("spark.streaming.backpressure.pid.proportional")
+      .version("1.5.0")
       .doubleConf
       .createWithDefault(1.0)
 
   private[streaming] val BACKPRESSURE_PID_INTEGRAL =
     ConfigBuilder("spark.streaming.backpressure.pid.integral")
+      .version("1.5.0")
       .doubleConf
       .createWithDefault(0.2)
 
   private[streaming] val BACKPRESSURE_PID_DERIVED =
     ConfigBuilder("spark.streaming.backpressure.pid.derived")
+      .version("1.5.0")
       .doubleConf
       .createWithDefault(0.0)
 
   private[streaming] val BACKPRESSURE_PID_MIN_RATE =
     ConfigBuilder("spark.streaming.backpressure.pid.minRate")
+      .version("1.5.0")
       .doubleConf
       .createWithDefault(100)
 
   private[streaming] val CONCURRENT_JOBS =
     ConfigBuilder("spark.streaming.concurrentJobs")
+      .version("0.7.0")
       .intConf
       .createWithDefault(1)
 
   private[streaming] val GRACEFUL_STOP_TIMEOUT =
     ConfigBuilder("spark.streaming.gracefulStopTimeout")
+      .version("1.0.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .createOptional
 
   private[streaming] val MANUAL_CLOCK_JUMP =
     ConfigBuilder("spark.streaming.manualClock.jump")
+      .version("0.7.0")
       .longConf
       .createWithDefault(0)
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to