This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/spark.git

commit 2c5b13f33add9943248c66873ada2c2e1e5d703a
Author: beliefer <[email protected]>
AuthorDate: Thu Feb 27 11:05:11 2020 +0900

    [SPARK-30888][CORE][DOC] Add version information to the configuration of 
Network
    
    ### What changes were proposed in this pull request?
    1.Add version information to the configuration of `Network`.
    2.Update the docs of `Network`.
    
    I sorted out some information show below.
    
    Item name | Since version | JIRA ID | Commit ID | Note
    -- | -- | -- | -- | --
    spark.network.crypto.saslFallback | 2.2.0 | SPARK-19139 | 
8f3f73abc1fe62496722476460c174af0250e3fe#diff-0ac65da2bc6b083fb861fe410c7688c2 
|  
    spark.network.crypto.enabled | 2.2.0 | SPARK-19139 | 
8f3f73abc1fe62496722476460c174af0250e3fe#diff-6bdad48cfc34314e89599655442ff210 
|  
    spark.network.remoteReadNioBufferConversion | 2.4.0 | SPARK-24307 | 
2c82745686f4456c4d5c84040a431dcb5b6cb60b#diff-2b643ea78c1add0381754b1f47eec132 
|  
    spark.network.timeout | 1.3.0 | SPARK-4688 | 
d3f07fd23cc26a70f44c52e24445974d4885d58a#diff-1df6b5af3d8f9f16255ff8c7a06f402f 
|  
    spark.network.timeoutInterval | 1.3.2 | SPARK-5529 | 
ec196ab1c7569d7ab0a50c9d7338c2835f2c84d5#diff-47779b72f095f7e7f926898fa1a425ee 
|  
    spark.rpc.askTimeout | 1.4.0 | SPARK-6490 | 
8136810dfad12008ac300116df7bc8448740f1ae#diff-529fc5c06b9731c1fbda6f3db60b16aa 
|  
    spark.rpc.connect.threads | 1.6.0 | SPARK-6028 | 
084e4e126211d74a79e8dbd2d0e604dd3c650822#diff-0c89b4a60c30a7cd2224bb64d93da942 
|  
    spark.rpc.io.numConnectionsPerPeer | 1.6.0 | SPARK-10745 | 
34a77679877bc40b58a10ec539a8da00fed7db39#diff-0c89b4a60c30a7cd2224bb64d93da942 
|  
    spark.rpc.io.threads | 1.6.0 | SPARK-6028 | 
084e4e126211d74a79e8dbd2d0e604dd3c650822#diff-0c89b4a60c30a7cd2224bb64d93da942 
|  
    spark.rpc.lookupTimeout | 1.4.0 | SPARK-6490 | 
8136810dfad12008ac300116df7bc8448740f1ae#diff-529fc5c06b9731c1fbda6f3db60b16aa 
|  
    spark.rpc.message.maxSize | 2.0.0 | SPARK-7997 | 
bc1babd63da4ee56e6d371eb24805a5d714e8295#diff-529fc5c06b9731c1fbda6f3db60b16aa 
|  
    spark.rpc.netty.dispatcher.numThreads | 1.6.0 | SPARK-11079 | 
1797055dbf1d2fd7714d7c65c8d2efde2f15efc1#diff-05133dfc4bfdb6a27aa092d86ce24866 
|  
    spark.rpc.numRetries | 1.4.0 | SPARK-6490 | 
8136810dfad12008ac300116df7bc8448740f1ae#diff-529fc5c06b9731c1fbda6f3db60b16aa 
|  
    spark.rpc.retry.wait | 1.4.0 | SPARK-6490 | 
8136810dfad12008ac300116df7bc8448740f1ae#diff-529fc5c06b9731c1fbda6f3db60b16aa 
|  
    
    ### Why are the changes needed?
    Supplemental configuration version information.
    
    ### Does this PR introduce any user-facing change?
    No
    
    ### How was this patch tested?
    Exists UT
    
    Closes #27674 from beliefer/add-version-to-network-config.
    
    Authored-by: beliefer <[email protected]>
    Signed-off-by: HyukjinKwon <[email protected]>
---
 .../org/apache/spark/internal/config/Network.scala   | 14 ++++++++++++++
 docs/configuration.md                                | 20 ++++++++++++++++++--
 2 files changed, 32 insertions(+), 2 deletions(-)

diff --git a/core/src/main/scala/org/apache/spark/internal/config/Network.scala 
b/core/src/main/scala/org/apache/spark/internal/config/Network.scala
index 129e31a..0961d06 100644
--- a/core/src/main/scala/org/apache/spark/internal/config/Network.scala
+++ b/core/src/main/scala/org/apache/spark/internal/config/Network.scala
@@ -23,71 +23,85 @@ private[spark] object Network {
 
   private[spark] val NETWORK_CRYPTO_SASL_FALLBACK =
     ConfigBuilder("spark.network.crypto.saslFallback")
+      .version("2.2.0")
       .booleanConf
       .createWithDefault(true)
 
   private[spark] val NETWORK_CRYPTO_ENABLED =
     ConfigBuilder("spark.network.crypto.enabled")
+      .version("2.2.0")
       .booleanConf
       .createWithDefault(false)
 
   private[spark] val NETWORK_REMOTE_READ_NIO_BUFFER_CONVERSION =
     ConfigBuilder("spark.network.remoteReadNioBufferConversion")
+      .version("2.4.0")
       .booleanConf
       .createWithDefault(false)
 
   private[spark] val NETWORK_TIMEOUT =
     ConfigBuilder("spark.network.timeout")
+      .version("1.3.0")
       .timeConf(TimeUnit.SECONDS)
       .createWithDefaultString("120s")
 
   private[spark] val NETWORK_TIMEOUT_INTERVAL =
     ConfigBuilder("spark.network.timeoutInterval")
+      .version("1.3.2")
       .timeConf(TimeUnit.MILLISECONDS)
       
.createWithDefaultString(STORAGE_BLOCKMANAGER_TIMEOUTINTERVAL.defaultValueString)
 
   private[spark] val RPC_ASK_TIMEOUT =
     ConfigBuilder("spark.rpc.askTimeout")
+      .version("1.4.0")
       .stringConf
       .createOptional
 
   private[spark] val RPC_CONNECT_THREADS =
     ConfigBuilder("spark.rpc.connect.threads")
+      .version("1.6.0")
       .intConf
       .createWithDefault(64)
 
   private[spark] val RPC_IO_NUM_CONNECTIONS_PER_PEER =
     ConfigBuilder("spark.rpc.io.numConnectionsPerPeer")
+      .version("1.6.0")
       .intConf
       .createWithDefault(1)
 
   private[spark] val RPC_IO_THREADS =
     ConfigBuilder("spark.rpc.io.threads")
+      .version("1.6.0")
       .intConf
       .createOptional
 
   private[spark] val RPC_LOOKUP_TIMEOUT =
     ConfigBuilder("spark.rpc.lookupTimeout")
+      .version("1.4.0")
       .stringConf
       .createOptional
 
   private[spark] val RPC_MESSAGE_MAX_SIZE =
     ConfigBuilder("spark.rpc.message.maxSize")
+      .version("2.0.0")
       .intConf
       .createWithDefault(128)
 
   private[spark] val RPC_NETTY_DISPATCHER_NUM_THREADS =
     ConfigBuilder("spark.rpc.netty.dispatcher.numThreads")
+      .version("1.6.0")
       .intConf
       .createOptional
 
   private[spark] val RPC_NUM_RETRIES =
     ConfigBuilder("spark.rpc.numRetries")
+      .version("1.4.0")
       .intConf
       .createWithDefault(3)
 
   private[spark] val RPC_RETRY_WAIT =
     ConfigBuilder("spark.rpc.retry.wait")
+      .version("1.4.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .createWithDefaultString("3s")
 }
diff --git a/docs/configuration.md b/docs/configuration.md
index 88edaf6..295cb3d 100644
--- a/docs/configuration.md
+++ b/docs/configuration.md
@@ -1658,7 +1658,7 @@ Apart from these, the following properties are also 
available, and may be useful
 ### Networking
 
 <table class="table">
-<tr><th>Property Name</th><th>Default</th><th>Meaning</th></tr>
+<tr><th>Property Name</th><th>Default</th><th>Meaning</th><th>Since 
Version</th></tr>
 <tr>
   <td><code>spark.rpc.message.maxSize</code></td>
   <td>128</td>
@@ -1667,6 +1667,7 @@ Apart from these, the following properties are also 
available, and may be useful
     output size information sent between executors and the driver. Increase 
this if you are running
     jobs with many thousands of map and reduce tasks and see messages about 
the RPC message size.
   </td>
+  <td>2.0.0</td>
 </tr>
 <tr>
   <td><code>spark.blockManager.port</code></td>
@@ -1674,6 +1675,7 @@ Apart from these, the following properties are also 
available, and may be useful
   <td>
     Port for all block managers to listen on. These exist on both the driver 
and the executors.
   </td>
+  <td></td>
 </tr>
 <tr>
   <td><code>spark.driver.blockManager.port</code></td>
@@ -1682,6 +1684,7 @@ Apart from these, the following properties are also 
available, and may be useful
     Driver-specific port for the block manager to listen on, for cases where 
it cannot use the same
     configuration as executors.
   </td>
+  <td></td>
 </tr>
 <tr>
   <td><code>spark.driver.bindAddress</code></td>
@@ -1695,6 +1698,7 @@ Apart from these, the following properties are also 
available, and may be useful
     the different ports used by the driver (RPC, block manager and UI) need to 
be forwarded from the
     container's host.
   </td>
+  <td></td>
 </tr>
 <tr>
   <td><code>spark.driver.host</code></td>
@@ -1703,6 +1707,7 @@ Apart from these, the following properties are also 
available, and may be useful
     Hostname or IP address for the driver.
     This is used for communicating with the executors and the standalone 
Master.
   </td>
+  <td></td>
 </tr>
 <tr>
   <td><code>spark.driver.port</code></td>
@@ -1711,6 +1716,7 @@ Apart from these, the following properties are also 
available, and may be useful
     Port for the driver to listen on.
     This is used for communicating with the executors and the standalone 
Master.
   </td>
+  <td></td>
 </tr>
 <tr>
   <td><code>spark.rpc.io.backLog</code></td>
@@ -1720,6 +1726,7 @@ Apart from these, the following properties are also 
available, and may be useful
     need to be increased, so that incoming connections are not dropped when a 
large number of
     connections arrives in a short period of time.
   </td>
+  <td></td>
 </tr>
 <tr>
   <td><code>spark.network.timeout</code></td>
@@ -1731,6 +1738,7 @@ Apart from these, the following properties are also 
available, and may be useful
     <code>spark.shuffle.io.connectionTimeout</code>, 
<code>spark.rpc.askTimeout</code> or
     <code>spark.rpc.lookupTimeout</code> if they are not configured.
   </td>
+  <td>1.3.0</td>
 </tr>
 <tr>
   <td><code>spark.network.io.preferDirectBufs</code></td>
@@ -1740,7 +1748,8 @@ Apart from these, the following properties are also 
available, and may be useful
     Off-heap buffers are used to reduce garbage collection during shuffle and 
cache
     block transfer. For environments where off-heap memory is tightly limited, 
users may wish to
     turn this off to force all allocations to be on-heap.
-    </td>
+  </td>
+  <td></td>
 </tr>
 <tr>
   <td><code>spark.port.maxRetries</code></td>
@@ -1752,6 +1761,7 @@ Apart from these, the following properties are also 
available, and may be useful
     essentially allows it to try a range of ports from the start port specified
     to port + maxRetries.
   </td>
+  <td></td>
 </tr>
 <tr>
   <td><code>spark.rpc.numRetries</code></td>
@@ -1760,6 +1770,7 @@ Apart from these, the following properties are also 
available, and may be useful
     Number of times to retry before an RPC task gives up.
     An RPC task will run at most times of this number.
   </td>
+  <td>1.4.0</td>
 </tr>
 <tr>
   <td><code>spark.rpc.retry.wait</code></td>
@@ -1767,6 +1778,7 @@ Apart from these, the following properties are also 
available, and may be useful
   <td>
     Duration for an RPC ask operation to wait before retrying.
   </td>
+  <td>1.4.0</td>
 </tr>
 <tr>
   <td><code>spark.rpc.askTimeout</code></td>
@@ -1774,6 +1786,7 @@ Apart from these, the following properties are also 
available, and may be useful
   <td>
     Duration for an RPC ask operation to wait before timing out.
   </td>
+  <td>1.4.0</td>
 </tr>
 <tr>
   <td><code>spark.rpc.lookupTimeout</code></td>
@@ -1781,6 +1794,7 @@ Apart from these, the following properties are also 
available, and may be useful
   <td>
     Duration for an RPC remote endpoint lookup operation to wait before timing 
out.
   </td>
+  <td>1.4.0</td>
 </tr>
 <tr>
   <td><code>spark.core.connection.ack.wait.timeout</code></td>
@@ -1790,6 +1804,7 @@ Apart from these, the following properties are also 
available, and may be useful
     out and giving up. To avoid unwilling timeout caused by long pause like GC,
     you can set larger value.
   </td>
+  <td></td>
 </tr>
 <tr>
   <td><code>spark.network.maxRemoteBlockSizeFetchToMem</code></td>
@@ -1801,6 +1816,7 @@ Apart from these, the following properties are also 
available, and may be useful
     For users who enabled external shuffle service, this feature can only work 
when
     external shuffle service is at least 2.3.0.
   </td>
+  <td></td>
 </tr>
 </table>
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to