This is an automated email from the ASF dual-hosted git repository. gurwls223 pushed a commit to branch branch-3.0 in repository https://gitbox.apache.org/repos/asf/spark.git
commit 528a07b1ee4dd1a53d7a8821e8bd41e328ee302c Author: HyukjinKwon <[email protected]> AuthorDate: Sun Apr 5 13:27:21 2020 +0900 Revert "[SPARK-30889][SPARK-30913][CORE][DOC] Add version information to the configuration of Tests.scala and Worker" This reverts commit 2b93493c202388d010bdd19aef0d9319be56e40e. --- .../org/apache/spark/internal/config/Tests.scala | 8 -------- .../org/apache/spark/internal/config/Worker.scala | 22 ---------------------- docs/configuration.md | 4 +--- docs/spark-standalone.md | 10 ++-------- 4 files changed, 3 insertions(+), 41 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/internal/config/Tests.scala b/core/src/main/scala/org/apache/spark/internal/config/Tests.scala index 232264d6..21660ab 100644 --- a/core/src/main/scala/org/apache/spark/internal/config/Tests.scala +++ b/core/src/main/scala/org/apache/spark/internal/config/Tests.scala @@ -22,43 +22,35 @@ private[spark] object Tests { val TEST_USE_COMPRESSED_OOPS_KEY = "spark.test.useCompressedOops" val TEST_MEMORY = ConfigBuilder("spark.testing.memory") - .version("1.6.0") .longConf .createWithDefault(Runtime.getRuntime.maxMemory) val TEST_SCHEDULE_INTERVAL = ConfigBuilder("spark.testing.dynamicAllocation.scheduleInterval") - .version("2.3.0") .longConf .createWithDefault(100) val IS_TESTING = ConfigBuilder("spark.testing") - .version("1.0.1") .booleanConf .createOptional val TEST_NO_STAGE_RETRY = ConfigBuilder("spark.test.noStageRetry") - .version("1.2.0") .booleanConf .createWithDefault(false) val TEST_RESERVED_MEMORY = ConfigBuilder("spark.testing.reservedMemory") - .version("1.6.0") .longConf .createOptional val TEST_N_HOSTS = ConfigBuilder("spark.testing.nHosts") - .version("3.0.0") .intConf .createWithDefault(5) val TEST_N_EXECUTORS_HOST = ConfigBuilder("spark.testing.nExecutorsPerHost") - .version("3.0.0") .intConf .createWithDefault(4) val TEST_N_CORES_EXECUTOR = ConfigBuilder("spark.testing.nCoresPerExecutor") - .version("3.0.0") .intConf .createWithDefault(2) } diff --git a/core/src/main/scala/org/apache/spark/internal/config/Worker.scala b/core/src/main/scala/org/apache/spark/internal/config/Worker.scala index 619b636..f1eaae2 100644 --- a/core/src/main/scala/org/apache/spark/internal/config/Worker.scala +++ b/core/src/main/scala/org/apache/spark/internal/config/Worker.scala @@ -28,69 +28,47 @@ private[spark] object Worker { .doc("Path to a file containing the resources allocated to the worker. " + "The file should be formatted as a JSON array of ResourceAllocation objects. " + "Only used internally in standalone mode.") - .version("3.0.0") .stringConf .createOptional val WORKER_TIMEOUT = ConfigBuilder("spark.worker.timeout") - .version("0.6.2") .longConf .createWithDefault(60) val WORKER_DRIVER_TERMINATE_TIMEOUT = ConfigBuilder("spark.worker.driverTerminateTimeout") - .version("2.1.2") .timeConf(TimeUnit.MILLISECONDS) .createWithDefaultString("10s") val WORKER_CLEANUP_ENABLED = ConfigBuilder("spark.worker.cleanup.enabled") - .version("1.0.0") .booleanConf .createWithDefault(false) val WORKER_CLEANUP_INTERVAL = ConfigBuilder("spark.worker.cleanup.interval") - .version("1.0.0") .longConf .createWithDefault(60 * 30) val APP_DATA_RETENTION = ConfigBuilder("spark.worker.cleanup.appDataTtl") - .version("1.0.0") .longConf .createWithDefault(7 * 24 * 3600) val PREFER_CONFIGURED_MASTER_ADDRESS = ConfigBuilder("spark.worker.preferConfiguredMasterAddress") - .version("2.2.1") .booleanConf .createWithDefault(false) val WORKER_UI_PORT = ConfigBuilder("spark.worker.ui.port") - .version("1.1.0") .intConf .createOptional val WORKER_UI_RETAINED_EXECUTORS = ConfigBuilder("spark.worker.ui.retainedExecutors") - .version("1.5.0") .intConf .createWithDefault(1000) val WORKER_UI_RETAINED_DRIVERS = ConfigBuilder("spark.worker.ui.retainedDrivers") - .version("1.5.0") .intConf .createWithDefault(1000) val UNCOMPRESSED_LOG_FILE_LENGTH_CACHE_SIZE_CONF = ConfigBuilder("spark.worker.ui.compressedLogFileLengthCacheSize") -<<<<<<< HEAD .intConf .createWithDefault(100) -======= - .version("2.0.2") - .intConf - .createWithDefault(100) - - private[spark] val WORKER_DECOMMISSION_ENABLED = - ConfigBuilder("spark.worker.decommission.enabled") - .version("3.1.0") - .booleanConf - .createWithDefault(false) ->>>>>>> ebcff675e0c... [SPARK-30889][SPARK-30913][CORE][DOC] Add version information to the configuration of Tests.scala and Worker } diff --git a/docs/configuration.md b/docs/configuration.md index b336289..9cbe341 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -929,7 +929,7 @@ Apart from these, the following properties are also available, and may be useful ### Spark UI <table class="table"> -<tr><th>Property Name</th><th>Default</th><th>Meaning</th><th>Since Version</th></tr> +<tr><th>Property Name</th><th>Default</th><th>Meaning</th></tr> <tr> <td><code>spark.eventLog.logBlockUpdates.enabled</code></td> <td>false</td> @@ -1153,7 +1153,6 @@ Apart from these, the following properties are also available, and may be useful <td> How many finished executors the Spark UI and status APIs remember before garbage collecting. </td> - <td>1.5.0</td> </tr> <tr> <td><code>spark.worker.ui.retainedDrivers</code></td> @@ -1161,7 +1160,6 @@ Apart from these, the following properties are also available, and may be useful <td> How many finished drivers the Spark UI and status APIs remember before garbage collecting. </td> - <td>1.5.0</td> </tr> <tr> <td><code>spark.sql.ui.retainedExecutions</code></td> diff --git a/docs/spark-standalone.md b/docs/spark-standalone.md index 4d4b85e..17b6772 100644 --- a/docs/spark-standalone.md +++ b/docs/spark-standalone.md @@ -185,7 +185,7 @@ You can optionally configure the cluster further by setting environment variable SPARK_MASTER_OPTS supports the following system properties: <table class="table"> -<tr><th>Property Name</th><th>Default</th><th>Meaning</th><th>Since Version</th></tr> +<tr><th>Property Name</th><th>Default</th><th>Meaning</th></tr> <tr> <td><code>spark.deploy.retainedApplications</code></td> <td>200</td> @@ -242,7 +242,6 @@ SPARK_MASTER_OPTS supports the following system properties: Number of seconds after which the standalone deploy master considers a worker lost if it receives no heartbeats. </td> - <td>0.6.2</td> </tr> <tr> <td><code>spark.worker.resource.{resourceName}.amount</code></td> @@ -270,14 +269,13 @@ SPARK_MASTER_OPTS supports the following system properties: find that resource. If the discovery script also does not find the resources, the worker will fail to start up. </td> - <td>3.0.0</td> </tr> </table> SPARK_WORKER_OPTS supports the following system properties: <table class="table"> -<tr><th>Property Name</th><th>Default</th><th>Meaning</th><th>Since Version</th></tr> +<tr><th>Property Name</th><th>Default</th><th>Meaning</th></tr> <tr> <td><code>spark.worker.cleanup.enabled</code></td> <td>false</td> @@ -286,7 +284,6 @@ SPARK_WORKER_OPTS supports the following system properties: mode, as YARN works differently. Only the directories of stopped applications are cleaned up. This should be enabled if spark.shuffle.service.db.enabled is "true" </td> - <td>1.0.0</td> </tr> <tr> <td><code>spark.worker.cleanup.interval</code></td> @@ -295,7 +292,6 @@ SPARK_WORKER_OPTS supports the following system properties: Controls the interval, in seconds, at which the worker cleans up old application work dirs on the local machine. </td> - <td>1.0.0</td> </tr> <tr> <td><code>spark.worker.cleanup.appDataTtl</code></td> @@ -306,7 +302,6 @@ SPARK_WORKER_OPTS supports the following system properties: downloaded to each application work dir. Over time, the work dirs can quickly fill up disk space, especially if you run jobs very frequently. </td> - <td>1.0.0</td> </tr> <tr> <td><code>spark.shuffle.service.db.enabled</code></td> @@ -338,7 +333,6 @@ SPARK_WORKER_OPTS supports the following system properties: Spark caches the uncompressed file size of compressed log files. This property controls the cache size. </td> - <td>2.0.2</td> </tr> </table> --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
