You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by gu...@apache.org on 2020/04/05 04:28:35 UTC

[spark] 02/02: [SPARK-30889][SPARK-30913][CORE][DOC] Add version information to the configuration of Tests.scala and Worker

This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/spark.git

commit 5017bd6e42ef8b0d4dcf7eabf4293da1eecef16e
Author: beliefer <be...@163.com>
AuthorDate: Thu Mar 5 11:58:21 2020 +0900

    [SPARK-30889][SPARK-30913][CORE][DOC] Add version information to the configuration of Tests.scala and Worker
    
    1.Add version information to the configuration of `Tests` and `Worker`.
    2.Update the docs of `Worker`.
    
    I sorted out some information of `Tests` show below.
    
    Item name | Since version | JIRA ID | Commit ID | Note
    -- | -- | -- | -- | --
    spark.testing.memory | 1.6.0 | SPARK-10983 | b3ffac5178795f2d8e7908b3e77e8e89f50b5f6f#diff-395d07dcd46359cca610ce74357f0bb4 |  
    spark.testing.dynamicAllocation.scheduleInterval | 2.3.0 | SPARK-22864 | 4e9e6aee44bb2ddb41b567d659358b22fd824222#diff-b096353602813e47074ace09a3890d56 |  
    spark.testing | 1.0.1 | SPARK-1606 | ce57624b8232159fe3ec6db228afc622133df591#diff-d239aee594001f8391676e1047a0381e |  
    spark.test.noStageRetry | 1.2.0 | SPARK-3796 | f55218aeb1e9d638df6229b36a59a15ce5363482#diff-6a9ff7fb74fd490a50462d45db2d5e11 |  
    spark.testing.reservedMemory | 1.6.0 | SPARK-12081 | 84c44b500b5c90dffbe1a6b0aa86f01699b09b96#diff-395d07dcd46359cca610ce74357f0bb4 |
    spark.testing.nHosts | 3.0.0 | SPARK-26491 | 1a641525e60039cc6b10816e946cb6f44b3e2696#diff-8b4ea8f3b0cc1e7ce7e943de1abbb165 |  
    spark.testing.nExecutorsPerHost | 3.0.0 | SPARK-26491 | 1a641525e60039cc6b10816e946cb6f44b3e2696#diff-8b4ea8f3b0cc1e7ce7e943de1abbb165 |  
    spark.testing.nCoresPerExecutor | 3.0.0 | SPARK-26491 | 1a641525e60039cc6b10816e946cb6f44b3e2696#diff-8b4ea8f3b0cc1e7ce7e943de1abbb165 |  
    spark.resources.warnings.testing | 3.1.0 | SPARK-29148 | 496f6ac86001d284cbfb7488a63dd3a168919c0f#diff-8b4ea8f3b0cc1e7ce7e943de1abbb165 |  
    spark.testing.resourceProfileManager | 3.1.0 | SPARK-29148 | 496f6ac86001d284cbfb7488a63dd3a168919c0f#diff-8b4ea8f3b0cc1e7ce7e943de1abbb165 |  
    
    I sorted out some information of `Worker` show below.
    
    Item name | Since version | JIRA ID | Commit ID | Note
    -- | -- | -- | -- | --
    spark.worker.resourcesFile | 3.0.0 | SPARK-27369 | 7cbe01e8efc3f6cd3a0cac4bcfadea8fcc74a955#diff-b2fc8d6ab7ac5735085e2d6cfacb95da |  
    spark.worker.timeout | 0.6.2 | None | e395aa295aeec6767df798bf1002b1f30983c1cd#diff-776a630ac2b2ec5fe85c07ca20a58fc0 |  
    spark.worker.driverTerminateTimeout | 2.1.2 | SPARK-20843 | ebd72f453aa0b4f68760d28b3e93e6dd33856659#diff-829a8674171f92acd61007bedb1bfa4f |  
    spark.worker.cleanup.enabled | 1.0.0 | SPARK-1154 | 1440154c27ca48b5a75103eccc9057286d3f6ca8#diff-916ca56b663f178f302c265b7ef38499 |  
    spark.worker.cleanup.interval | 1.0.0 | SPARK-1154 | 1440154c27ca48b5a75103eccc9057286d3f6ca8#diff-916ca56b663f178f302c265b7ef38499 |  
    spark.worker.cleanup.appDataTtl | 1.0.0 | SPARK-1154 | 1440154c27ca48b5a75103eccc9057286d3f6ca8#diff-916ca56b663f178f302c265b7ef38499 |  
    spark.worker.preferConfiguredMasterAddress | 2.2.1 | SPARK-20529 | 75e5ea294c15ecfb7366ae15dce196aa92c87ca4#diff-916ca56b663f178f302c265b7ef38499 |  
    spark.worker.ui.port | 1.1.0 | SPARK-2857 | 12f99cf5f88faf94d9dbfe85cb72d0010a3a25ac#diff-48ca297b6536cb92362bec1487581f05 |  
    spark.worker.ui.retainedExecutors | 1.5.0 | SPARK-9202 | c0686668ae6a92b6bb4801a55c3b78aedbee816a#diff-916ca56b663f178f302c265b7ef38499 |
    spark.worker.ui.retainedDrivers | 1.5.0 | SPARK-9202 | c0686668ae6a92b6bb4801a55c3b78aedbee816a#diff-916ca56b663f178f302c265b7ef38499 |
    spark.worker.ui.compressedLogFileLengthCacheSize | 2.0.2 | SPARK-17711 | 26e978a93f029e1a1b5c7524d0b52c8141b70997#diff-d239aee594001f8391676e1047a0381e |  
    spark.worker.decommission.enabled | 3.1.0 | SPARK-20628 | d273a2bb0fac452a97f5670edd69d3e452e3e57e#diff-b2fc8d6ab7ac5735085e2d6cfacb95da |  
    
    Supplemental configuration version information.
    
    No
    
    Exists UT
    
    Closes #27783 from beliefer/add-version-to-tests-config.
    
    Authored-by: beliefer <be...@163.com>
    Signed-off-by: HyukjinKwon <gu...@apache.org>
---
 .../scala/org/apache/spark/internal/config/Tests.scala    |  8 ++++++++
 .../scala/org/apache/spark/internal/config/Worker.scala   | 15 +++++++++++++--
 docs/configuration.md                                     |  4 +++-
 docs/spark-standalone.md                                  | 10 ++++++++--
 4 files changed, 32 insertions(+), 5 deletions(-)

diff --git a/core/src/main/scala/org/apache/spark/internal/config/Tests.scala b/core/src/main/scala/org/apache/spark/internal/config/Tests.scala
index 21660ab..232264d6 100644
--- a/core/src/main/scala/org/apache/spark/internal/config/Tests.scala
+++ b/core/src/main/scala/org/apache/spark/internal/config/Tests.scala
@@ -22,35 +22,43 @@ private[spark] object Tests {
   val TEST_USE_COMPRESSED_OOPS_KEY = "spark.test.useCompressedOops"
 
   val TEST_MEMORY = ConfigBuilder("spark.testing.memory")
+    .version("1.6.0")
     .longConf
     .createWithDefault(Runtime.getRuntime.maxMemory)
 
   val TEST_SCHEDULE_INTERVAL =
     ConfigBuilder("spark.testing.dynamicAllocation.scheduleInterval")
+      .version("2.3.0")
       .longConf
       .createWithDefault(100)
 
   val IS_TESTING = ConfigBuilder("spark.testing")
+    .version("1.0.1")
     .booleanConf
     .createOptional
 
   val TEST_NO_STAGE_RETRY = ConfigBuilder("spark.test.noStageRetry")
+    .version("1.2.0")
     .booleanConf
     .createWithDefault(false)
 
   val TEST_RESERVED_MEMORY = ConfigBuilder("spark.testing.reservedMemory")
+    .version("1.6.0")
     .longConf
     .createOptional
 
   val TEST_N_HOSTS = ConfigBuilder("spark.testing.nHosts")
+    .version("3.0.0")
     .intConf
     .createWithDefault(5)
 
   val TEST_N_EXECUTORS_HOST = ConfigBuilder("spark.testing.nExecutorsPerHost")
+    .version("3.0.0")
     .intConf
     .createWithDefault(4)
 
   val TEST_N_CORES_EXECUTOR = ConfigBuilder("spark.testing.nCoresPerExecutor")
+    .version("3.0.0")
     .intConf
     .createWithDefault(2)
 }
diff --git a/core/src/main/scala/org/apache/spark/internal/config/Worker.scala b/core/src/main/scala/org/apache/spark/internal/config/Worker.scala
index f1eaae2..a807271 100644
--- a/core/src/main/scala/org/apache/spark/internal/config/Worker.scala
+++ b/core/src/main/scala/org/apache/spark/internal/config/Worker.scala
@@ -28,47 +28,58 @@ private[spark] object Worker {
     .doc("Path to a file containing the resources allocated to the worker. " +
       "The file should be formatted as a JSON array of ResourceAllocation objects. " +
       "Only used internally in standalone mode.")
+    .version("3.0.0")
     .stringConf
     .createOptional
 
   val WORKER_TIMEOUT = ConfigBuilder("spark.worker.timeout")
+    .version("0.6.2")
     .longConf
     .createWithDefault(60)
 
   val WORKER_DRIVER_TERMINATE_TIMEOUT = ConfigBuilder("spark.worker.driverTerminateTimeout")
+    .version("2.1.2")
     .timeConf(TimeUnit.MILLISECONDS)
     .createWithDefaultString("10s")
 
   val WORKER_CLEANUP_ENABLED = ConfigBuilder("spark.worker.cleanup.enabled")
+    .version("1.0.0")
     .booleanConf
     .createWithDefault(false)
 
   val WORKER_CLEANUP_INTERVAL = ConfigBuilder("spark.worker.cleanup.interval")
+    .version("1.0.0")
     .longConf
     .createWithDefault(60 * 30)
 
   val APP_DATA_RETENTION = ConfigBuilder("spark.worker.cleanup.appDataTtl")
+    .version("1.0.0")
     .longConf
     .createWithDefault(7 * 24 * 3600)
 
   val PREFER_CONFIGURED_MASTER_ADDRESS = ConfigBuilder("spark.worker.preferConfiguredMasterAddress")
+    .version("2.2.1")
     .booleanConf
     .createWithDefault(false)
 
   val WORKER_UI_PORT = ConfigBuilder("spark.worker.ui.port")
+    .version("1.1.0")
     .intConf
     .createOptional
 
   val WORKER_UI_RETAINED_EXECUTORS = ConfigBuilder("spark.worker.ui.retainedExecutors")
+    .version("1.5.0")
     .intConf
     .createWithDefault(1000)
 
   val WORKER_UI_RETAINED_DRIVERS = ConfigBuilder("spark.worker.ui.retainedDrivers")
+    .version("1.5.0")
     .intConf
     .createWithDefault(1000)
 
   val UNCOMPRESSED_LOG_FILE_LENGTH_CACHE_SIZE_CONF =
     ConfigBuilder("spark.worker.ui.compressedLogFileLengthCacheSize")
-    .intConf
-    .createWithDefault(100)
+      .version("2.0.2")
+      .intConf
+      .createWithDefault(100)
 }
diff --git a/docs/configuration.md b/docs/configuration.md
index 9cbe341..b336289 100644
--- a/docs/configuration.md
+++ b/docs/configuration.md
@@ -929,7 +929,7 @@ Apart from these, the following properties are also available, and may be useful
 ### Spark UI
 
 <table class="table">
-<tr><th>Property Name</th><th>Default</th><th>Meaning</th></tr>
+<tr><th>Property Name</th><th>Default</th><th>Meaning</th><th>Since Version</th></tr>
 <tr>
   <td><code>spark.eventLog.logBlockUpdates.enabled</code></td>
   <td>false</td>
@@ -1153,6 +1153,7 @@ Apart from these, the following properties are also available, and may be useful
   <td>
     How many finished executors the Spark UI and status APIs remember before garbage collecting.
   </td>
+  <td>1.5.0</td>
 </tr>
 <tr>
   <td><code>spark.worker.ui.retainedDrivers</code></td>
@@ -1160,6 +1161,7 @@ Apart from these, the following properties are also available, and may be useful
   <td>
     How many finished drivers the Spark UI and status APIs remember before garbage collecting.
   </td>
+  <td>1.5.0</td>
 </tr>
 <tr>
   <td><code>spark.sql.ui.retainedExecutions</code></td>
diff --git a/docs/spark-standalone.md b/docs/spark-standalone.md
index 17b6772..4d4b85e 100644
--- a/docs/spark-standalone.md
+++ b/docs/spark-standalone.md
@@ -185,7 +185,7 @@ You can optionally configure the cluster further by setting environment variable
 SPARK_MASTER_OPTS supports the following system properties:
 
 <table class="table">
-<tr><th>Property Name</th><th>Default</th><th>Meaning</th></tr>
+<tr><th>Property Name</th><th>Default</th><th>Meaning</th><th>Since Version</th></tr>
 <tr>
   <td><code>spark.deploy.retainedApplications</code></td>
   <td>200</td>
@@ -242,6 +242,7 @@ SPARK_MASTER_OPTS supports the following system properties:
     Number of seconds after which the standalone deploy master considers a worker lost if it
     receives no heartbeats.
   </td>
+  <td>0.6.2</td>
 </tr>
 <tr>
   <td><code>spark.worker.resource.{resourceName}.amount</code></td>
@@ -269,13 +270,14 @@ SPARK_MASTER_OPTS supports the following system properties:
     find that resource. If the discovery script also does not find the resources, the worker will fail
     to start up.
   </td>
+  <td>3.0.0</td>
 </tr>
 </table>
 
 SPARK_WORKER_OPTS supports the following system properties:
 
 <table class="table">
-<tr><th>Property Name</th><th>Default</th><th>Meaning</th></tr>
+<tr><th>Property Name</th><th>Default</th><th>Meaning</th><th>Since Version</th></tr>
 <tr>
   <td><code>spark.worker.cleanup.enabled</code></td>
   <td>false</td>
@@ -284,6 +286,7 @@ SPARK_WORKER_OPTS supports the following system properties:
     mode, as YARN works differently. Only the directories of stopped applications are cleaned up.
     This should be enabled if spark.shuffle.service.db.enabled is "true"
   </td>
+  <td>1.0.0</td>
 </tr>
 <tr>
   <td><code>spark.worker.cleanup.interval</code></td>
@@ -292,6 +295,7 @@ SPARK_WORKER_OPTS supports the following system properties:
     Controls the interval, in seconds, at which the worker cleans up old application work dirs
     on the local machine.
   </td>
+  <td>1.0.0</td>
 </tr>
 <tr>
   <td><code>spark.worker.cleanup.appDataTtl</code></td>
@@ -302,6 +306,7 @@ SPARK_WORKER_OPTS supports the following system properties:
     downloaded to each application work dir.  Over time, the work dirs can quickly fill up disk space,
     especially if you run jobs very frequently.
   </td>
+  <td>1.0.0</td>
 </tr>
 <tr>
   <td><code>spark.shuffle.service.db.enabled</code></td>
@@ -333,6 +338,7 @@ SPARK_WORKER_OPTS supports the following system properties:
     Spark caches the uncompressed file size of compressed log files. This property controls the cache
     size.
   </td>
+  <td>2.0.2</td>
 </tr>
 </table>
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org