You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by gu...@apache.org on 2020/04/07 05:50:28 UTC

[spark] branch branch-3.0 updated (ad8c537 -> aedaa62)

This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a change to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/spark.git.


    from ad8c537  Revert "[SPARK-31073][DOC][FOLLOWUP] Add description for Shuffle Write Time metric in StagePage to web-ui.md"
     new 87d41c1  [SPARK-31092][YARN][DOC] Add version information to the configuration of Yarn
     new d8b86fd  [SPARK-31109][MESOS][DOC] Add version information to the configuration of Mesos
     new aedaa62  [SPARK-31118][K8S][DOC] Add version information to the configuration of K8S

The 3 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 docs/running-on-kubernetes.md                      | 77 +++++++++++++++++++++-
 docs/running-on-mesos.md                           | 47 +++++++++++--
 docs/running-on-yarn.md                            | 64 +++++++++++++++---
 .../scala/org/apache/spark/deploy/k8s/Config.scala | 48 +++++++++++++-
 .../org/apache/spark/deploy/mesos/config.scala     | 52 +++++++++++++--
 .../org/apache/spark/deploy/yarn/config.scala      | 56 +++++++++++++++-
 6 files changed, 317 insertions(+), 27 deletions(-)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org


[spark] 01/03: [SPARK-31092][YARN][DOC] Add version information to the configuration of Yarn

Posted by gu...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/spark.git

commit 87d41c11cb1aaaeb903ac80c3fd0baf1159c3676
Author: beliefer <be...@163.com>
AuthorDate: Thu Mar 12 09:52:57 2020 +0900

    [SPARK-31092][YARN][DOC] Add version information to the configuration of Yarn
    
    ### What changes were proposed in this pull request?
    Add version information to the configuration of `Yarn`.
    
    I sorted out some information show below.
    
    Item name | Since version | JIRA ID | Commit ID | Note
    -- | -- | -- | -- | --
    spark.yarn.tags | 1.5.0 | SPARK-9782 | 9b731fad2b43ca18f3c5274062d4c7bc2622ab72#diff-b050df3f55b82065803d6e83453b9706 |  
    spark.yarn.priority | 3.0.0 | SPARK-29603 | 4615769736f4c052ae1a2de26e715e229154cd2f#diff-4804e0f83ca7f891183eb0db229b4b9a |  
    spark.yarn.am.attemptFailuresValidityInterval | 1.6.0 | SPARK-10739 | f97e9323b526b3d0b0fee0ca03f4276f37bb5750#diff-b050df3f55b82065803d6e83453b9706 |
    spark.yarn.executor.failuresValidityInterval | 2.0.0 | SPARK-6735 | 8b44bd52fa40c0fc7d34798c3654e31533fd3008#diff-14b8ed2ef4e3da985300b8d796a38fa9 |
    spark.yarn.maxAppAttempts | 1.3.0 | SPARK-2165 | 8fdd48959c93b9cf809f03549e2ae6c4687d1fcd#diff-b050df3f55b82065803d6e83453b9706 |
    spark.yarn.user.classpath.first | 1.3.0 | SPARK-5087 | 8d45834debc6986e61831d0d6e982d5528dccc51#diff-b050df3f55b82065803d6e83453b9706 |  
    spark.yarn.config.gatewayPath | 1.5.0 | SPARK-8302 | 37bf76a2de2143ec6348a3d43b782227849520cc#diff-b050df3f55b82065803d6e83453b9706 |  
    spark.yarn.config.replacementPath | 1.5.0 | SPARK-8302 | 37bf76a2de2143ec6348a3d43b782227849520cc#diff-b050df3f55b82065803d6e83453b9706 |  
    spark.yarn.queue | 1.0.0 | SPARK-1126 | 1617816090e7b20124a512a43860a21232ebf511#diff-ae6a41a938a767e5bb97b5d738371a5b |  
    spark.yarn.historyServer.address | 1.0.0 | SPARK-1408 | 0058b5d2c74147d24b127a5432f89ebc7050dc18#diff-923ae58523a12397f74dd590744b8b41 |  
    spark.yarn.historyServer.allowTracking | 2.2.0 | SPARK-19554 | 4661d30b988bf773ab45a15b143efb2908d33743#diff-4804e0f83ca7f891183eb0db229b4b9a |
    spark.yarn.archive | 2.0.0 | SPARK-13577 | 07f1c5447753a3d593cd6ececfcb03c11b1cf8ff#diff-14b8ed2ef4e3da985300b8d796a38fa9 |  
    spark.yarn.jars | 2.0.0 | SPARK-13577 | 07f1c5447753a3d593cd6ececfcb03c11b1cf8ff#diff-14b8ed2ef4e3da985300b8d796a38fa9 |  
    spark.yarn.dist.archives | 1.0.0 | SPARK-1126 | 1617816090e7b20124a512a43860a21232ebf511#diff-ae6a41a938a767e5bb97b5d738371a5b |  
    spark.yarn.dist.files | 1.0.0 | SPARK-1126 | 1617816090e7b20124a512a43860a21232ebf511#diff-ae6a41a938a767e5bb97b5d738371a5b |  
    spark.yarn.dist.jars | 2.0.0 | SPARK-12343 | 8ba2b7f28fee39c4839e5ea125bd25f5091a3a1e#diff-14b8ed2ef4e3da985300b8d796a38fa9 |  
    spark.yarn.preserve.staging.files | 1.1.0 | SPARK-2933 | b92d823ad13f6fcc325eeb99563bea543871c6aa#diff-85a1f4b2810b3e11b8434dcefac5bb85 |  
    spark.yarn.submit.file.replication | 0.8.1 | None | 4668fcb9ff8f9c176c4866480d52dde5d67c8522#diff-b050df3f55b82065803d6e83453b9706 |
    spark.yarn.submit.waitAppCompletion | 1.4.0 | SPARK-3591 | b65bad65c3500475b974ca0219f218eef296db2c#diff-b050df3f55b82065803d6e83453b9706 |
    spark.yarn.report.interval | 0.9.0 | None | ebdfa6bb9766209bc5a3c4241fa47141c5e9c5cb#diff-e0a7ae95b6d8e04a67ebca0945d27b65 |  
    spark.yarn.clientLaunchMonitorInterval | 2.3.0 | SPARK-16019 | 1cad31f00644d899d8e74d58c6eb4e9f72065473#diff-4804e0f83ca7f891183eb0db229b4b9a |
    spark.yarn.am.waitTime | 1.3.0 | SPARK-3779 | 253b72b56fe908bbab5d621eae8a5f359c639dfd#diff-87125050a2e2eaf87ea83aac9c19b200 |  
    spark.yarn.metrics.namespace | 2.4.0 | SPARK-24594 | d2436a85294a178398525c37833dae79d45c1452#diff-4804e0f83ca7f891183eb0db229b4b9a |
    spark.yarn.am.nodeLabelExpression | 1.6.0 | SPARK-7173 | 7db3610327d0725ec2ad378bc873b127a59bb87a#diff-b050df3f55b82065803d6e83453b9706 |
    spark.yarn.containerLauncherMaxThreads | 1.2.0 | SPARK-1713 | 1f4a648d4e30e837d6cf3ea8de1808e2254ad70b#diff-801a04f9e67321f3203399f7f59234c1 |  
    spark.yarn.max.executor.failures | 1.0.0 | SPARK-1183 | 698373211ef3cdf841c82d48168cd5dbe00a57b4#diff-0c239e58b37779967e0841fb42f3415a |  
    spark.yarn.scheduler.reporterThread.maxFailures | 1.2.0 | SPARK-3304 | 11c10df825419372df61a8d23c51e8c3cc78047f#diff-85a1f4b2810b3e11b8434dcefac5bb85 |  
    spark.yarn.scheduler.heartbeat.interval-ms | 0.8.1 | None | ee22be0e6c302fb2cdb24f83365c2b8a43a1baab#diff-87125050a2e2eaf87ea83aac9c19b200 |  
    spark.yarn.scheduler.initial-allocation.interval | 1.4.0 | SPARK-7533 | 3ddf051ee7256f642f8a17768d161c7b5f55c7e1#diff-87125050a2e2eaf87ea83aac9c19b200 |  
    spark.yarn.am.finalMessageLimit | 2.4.0 | SPARK-25174 | f8346d2fc01f1e881e4e3f9c4499bf5f9e3ceb3f#diff-4804e0f83ca7f891183eb0db229b4b9a |  
    spark.yarn.am.cores | 1.3.0 | SPARK-1507 | 2be82b1e66cd188456bbf1e5abb13af04d1629d5#diff-746d34aa06bfa57adb9289011e725472 |  
    spark.yarn.am.extraJavaOptions | 1.3.0 | SPARK-5087 | 8d45834debc6986e61831d0d6e982d5528dccc51#diff-b050df3f55b82065803d6e83453b9706 |  
    spark.yarn.am.extraLibraryPath | 1.4.0 | SPARK-7281 | 7b5dd3e3c0030087eea5a8224789352c03717c1d#diff-b050df3f55b82065803d6e83453b9706 |  
    spark.yarn.am.memoryOverhead | 1.3.0 | SPARK-1953 | e96645206006a009e5c1a23bbd177dcaf3ef9b83#diff-746d34aa06bfa57adb9289011e725472 |  
    spark.yarn.am.memory | 1.3.0 | SPARK-1953 | e96645206006a009e5c1a23bbd177dcaf3ef9b83#diff-746d34aa06bfa57adb9289011e725472 |  
    spark.driver.appUIAddress | 1.1.0 | SPARK-1291 | 72ea56da8e383c61c6f18eeefef03b9af00f5158#diff-2b4617e158e9c5999733759550440b96 |  
    spark.yarn.executor.nodeLabelExpression | 1.4.0 | SPARK-6470 | 82fee9d9aad2c9ba2fb4bd658579fe99218cafac#diff-d4620cf162e045960d84c88b2e0aa428 |  
    spark.yarn.unmanagedAM.enabled | 3.0.0 | SPARK-22404 | f06bc0cd1dee2a58e04ebf24bf719a2f7ef2dc4e#diff-4804e0f83ca7f891183eb0db229b4b9a |  
    spark.yarn.rolledLog.includePattern | 2.0.0 | SPARK-15990 | 272a2f78f3ff801b94a81fa8fcc6633190eaa2f4#diff-14b8ed2ef4e3da985300b8d796a38fa9 |  
    spark.yarn.rolledLog.excludePattern | 2.0.0 | SPARK-15990 | 272a2f78f3ff801b94a81fa8fcc6633190eaa2f4#diff-14b8ed2ef4e3da985300b8d796a38fa9 |  
    spark.yarn.user.jar | 1.1.0 | SPARK-1395 | e380767de344fd6898429de43da592658fd86a39#diff-50e237ea17ce94c3ccfc44143518a5f7 |  
    spark.yarn.secondary.jars | 0.9.2 | SPARK-1870 | 1d3aab96120c6770399e78a72b5692cf8f61a144#diff-50b743cff4885220c828b16c44eeecfd |  
    spark.yarn.cache.filenames | 2.0.0 | SPARK-14602 | f47dbf27fa034629fab12d0f3c89ab75edb03f86#diff-14b8ed2ef4e3da985300b8d796a38fa9 |  
    spark.yarn.cache.sizes | 2.0.0 | SPARK-14602 | f47dbf27fa034629fab12d0f3c89ab75edb03f86#diff-14b8ed2ef4e3da985300b8d796a38fa9 |  
    spark.yarn.cache.timestamps | 2.0.0 | SPARK-14602 | f47dbf27fa034629fab12d0f3c89ab75edb03f86#diff-14b8ed2ef4e3da985300b8d796a38fa9 |  
    spark.yarn.cache.visibilities | 2.0.0 | SPARK-14602 | f47dbf27fa034629fab12d0f3c89ab75edb03f86#diff-14b8ed2ef4e3da985300b8d796a38fa9 |  
    spark.yarn.cache.types | 2.0.0 | SPARK-14602 | f47dbf27fa034629fab12d0f3c89ab75edb03f86#diff-14b8ed2ef4e3da985300b8d796a38fa9 |  
    spark.yarn.cache.confArchive | 2.0.0 | SPARK-14602 | f47dbf27fa034629fab12d0f3c89ab75edb03f86#diff-14b8ed2ef4e3da985300b8d796a38fa9 |  
    spark.yarn.blacklist.executor.launch.blacklisting.enabled | 2.4.0 | SPARK-16630 | b56e9c613fb345472da3db1a567ee129621f6bf3#diff-4804e0f83ca7f891183eb0db229b4b9a |  
    spark.yarn.exclude.nodes | 3.0.0 | SPARK-26688 | caceaec93203edaea1d521b88e82ef67094cdea9#diff-4804e0f83ca7f891183eb0db229b4b9a |  
    The following appears in the document |   |   |   |  
    spark.yarn.am.resource.{resource-type}.amount | 3.0.0 | SPARK-20327 | 3946de773498621f88009c309254b019848ed490#diff-4804e0f83ca7f891183eb0db229b4b9a |  
    spark.yarn.driver.resource.{resource-type}.amount | 3.0.0 | SPARK-20327 | 3946de773498621f88009c309254b019848ed490#diff-4804e0f83ca7f891183eb0db229b4b9a |  
    spark.yarn.executor.resource.{resource-type}.amount | 3.0.0 | SPARK-20327 | 3946de773498621f88009c309254b019848ed490#diff-4804e0f83ca7f891183eb0db229b4b9a |  
    spark.yarn.appMasterEnv.[EnvironmentVariableName] | 1.1.0 | SPARK-1680 | 7b798e10e214cd407d3399e2cab9e3789f9a929e#diff-50e237ea17ce94c3ccfc44143518a5f7 |  
    spark.yarn.kerberos.relogin.period | 2.3.0 | SPARK-22290 | dc2714da50ecba1bf1fdf555a82a4314f763a76e#diff-4804e0f83ca7f891183eb0db229b4b9a |  
    
    ### Why are the changes needed?
    Supplemental configuration version information.
    
    ### Does this PR introduce any user-facing change?
    'No'.
    
    ### How was this patch tested?
    Exists UT
    
    Closes #27856 from beliefer/add-version-to-yarn-config.
    
    Authored-by: beliefer <be...@163.com>
    Signed-off-by: HyukjinKwon <gu...@apache.org>
---
 docs/running-on-yarn.md                            | 64 ++++++++++++++++++----
 .../org/apache/spark/deploy/yarn/config.scala      | 56 ++++++++++++++++++-
 2 files changed, 107 insertions(+), 13 deletions(-)

diff --git a/docs/running-on-yarn.md b/docs/running-on-yarn.md
index ab4e963..eec73e8 100644
--- a/docs/running-on-yarn.md
+++ b/docs/running-on-yarn.md
@@ -130,7 +130,7 @@ To use a custom metrics.properties for the application master and executors, upd
 #### Spark Properties
 
 <table class="table">
-<tr><th>Property Name</th><th>Default</th><th>Meaning</th></tr>
+<tr><th>Property Name</th><th>Default</th><th>Meaning</th><th>Since Version</th></tr>
 <tr>
   <td><code>spark.yarn.am.memory</code></td>
   <td><code>512m</code></td>
@@ -140,6 +140,7 @@ To use a custom metrics.properties for the application master and executors, upd
     <p/>
     Use lower-case suffixes, e.g. <code>k</code>, <code>m</code>, <code>g</code>, <code>t</code>, and <code>p</code>, for kibi-, mebi-, gibi-, tebi-, and pebibytes, respectively.
   </td>
+  <td>1.3.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.am.resource.{resource-type}.amount</code></td>
@@ -153,6 +154,7 @@ To use a custom metrics.properties for the application master and executors, upd
     Example: 
     To request GPU resources from YARN, use: <code>spark.yarn.am.resource.yarn.io/gpu.amount</code>
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.driver.resource.{resource-type}.amount</code></td>
@@ -165,18 +167,20 @@ To use a custom metrics.properties for the application master and executors, upd
     Example: 
     To request GPU resources from YARN, use: <code>spark.yarn.driver.resource.yarn.io/gpu.amount</code>
   </td>
+  <td>3.0.0</td> 
 </tr>
 <tr>
   <td><code>spark.yarn.executor.resource.{resource-type}.amount</code></td>
   <td><code>(none)</code></td>
- <td>
-     Amount of resource to use per executor process.
-     Please note that this feature can be used only with YARN 3.0+
-     For reference, see YARN Resource Model documentation: https://hadoop.apache.org/docs/r3.0.1/hadoop-yarn/hadoop-yarn-site/ResourceModel.html
-     <p/>
-     Example: 
-     To request GPU resources from YARN, use: <code>spark.yarn.executor.resource.yarn.io/gpu.amount</code>
- </td>
+  <td>
+    Amount of resource to use per executor process.
+    Please note that this feature can be used only with YARN 3.0+
+    For reference, see YARN Resource Model documentation: https://hadoop.apache.org/docs/r3.0.1/hadoop-yarn/hadoop-yarn-site/ResourceModel.html
+    <p/>
+    Example: 
+    To request GPU resources from YARN, use: <code>spark.yarn.executor.resource.yarn.io/gpu.amount</code>
+  </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.am.cores</code></td>
@@ -185,6 +189,7 @@ To use a custom metrics.properties for the application master and executors, upd
     Number of cores to use for the YARN Application Master in client mode.
     In cluster mode, use <code>spark.driver.cores</code> instead.
   </td>
+  <td>1.3.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.am.waitTime</code></td>
@@ -193,6 +198,7 @@ To use a custom metrics.properties for the application master and executors, upd
     Only used in <code>cluster</code> mode. Time for the YARN Application Master to wait for the
     SparkContext to be initialized.
   </td>
+ <td>1.3.0</td> 
 </tr>
 <tr>
   <td><code>spark.yarn.submit.file.replication</code></td>
@@ -200,6 +206,7 @@ To use a custom metrics.properties for the application master and executors, upd
   <td>
     HDFS replication level for the files uploaded into HDFS for the application. These include things like the Spark jar, the app jar, and any distributed cache files/archives.
   </td>
+  <td>0.8.1</td>
 </tr>
 <tr>
   <td><code>spark.yarn.stagingDir</code></td>
@@ -207,6 +214,7 @@ To use a custom metrics.properties for the application master and executors, upd
   <td>
     Staging directory used while submitting applications.
   </td>
+ <td>2.0.0</td> 
 </tr>
 <tr>
   <td><code>spark.yarn.preserve.staging.files</code></td>
@@ -214,6 +222,7 @@ To use a custom metrics.properties for the application master and executors, upd
   <td>
     Set to <code>true</code> to preserve the staged files (Spark jar, app jar, distributed cache files) at the end of the job rather than delete them.
   </td>
+  <td>1.1.0</td> 
 </tr>
 <tr>
   <td><code>spark.yarn.scheduler.heartbeat.interval-ms</code></td>
@@ -223,6 +232,7 @@ To use a custom metrics.properties for the application master and executors, upd
     The value is capped at half the value of YARN's configuration for the expiry interval, i.e.
     <code>yarn.am.liveness-monitor.expiry-interval-ms</code>.
   </td>
+  <td>0.8.1</td>
 </tr>
 <tr>
   <td><code>spark.yarn.scheduler.initial-allocation.interval</code></td>
@@ -234,6 +244,7 @@ To use a custom metrics.properties for the application master and executors, upd
     successive eager heartbeats if pending containers still exist, until
     <code>spark.yarn.scheduler.heartbeat.interval-ms</code> is reached.
   </td>
+  <td>1.4.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.max.executor.failures</code></td>
@@ -241,6 +252,7 @@ To use a custom metrics.properties for the application master and executors, upd
   <td>
     The maximum number of executor failures before failing the application.
   </td>
+  <td>1.0.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.historyServer.address</code></td>
@@ -249,6 +261,7 @@ To use a custom metrics.properties for the application master and executors, upd
     The address of the Spark history server, e.g. <code>host.com:18080</code>. The address should not contain a scheme (<code>http://</code>). Defaults to not being set since the history server is an optional service. This address is given to the YARN ResourceManager when the Spark application finishes to link the application from the ResourceManager UI to the Spark history server UI.
     For this property, YARN properties can be used as variables, and these are substituted by Spark at runtime. For example, if the Spark history server runs on the same node as the YARN ResourceManager, it can be set to <code>${hadoopconf-yarn.resourcemanager.hostname}:18080</code>.
   </td>
+  <td>1.0.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.dist.archives</code></td>
@@ -256,6 +269,7 @@ To use a custom metrics.properties for the application master and executors, upd
   <td>
     Comma separated list of archives to be extracted into the working directory of each executor.
   </td>
+  <td>1.0.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.dist.files</code></td>
@@ -263,6 +277,7 @@ To use a custom metrics.properties for the application master and executors, upd
   <td>
     Comma-separated list of files to be placed in the working directory of each executor.
   </td>
+  <td>1.0.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.dist.jars</code></td>
@@ -270,6 +285,7 @@ To use a custom metrics.properties for the application master and executors, upd
   <td>
     Comma-separated list of jars to be placed in the working directory of each executor.
   </td>
+  <td>2.0.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.dist.forceDownloadSchemes</code></td>
@@ -280,6 +296,7 @@ To use a custom metrics.properties for the application master and executors, upd
     support schemes that are supported by Spark, like http, https and ftp, or jars required to be in the
     local YARN client's classpath. Wildcard '*' is denoted to download resources for all the schemes.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
  <td><code>spark.executor.instances</code></td>
@@ -287,6 +304,7 @@ To use a custom metrics.properties for the application master and executors, upd
   <td>
     The number of executors for static allocation. With <code>spark.dynamicAllocation.enabled</code>, the initial set of executors will be at least this large.
   </td>
+  <td>1.0.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.am.memoryOverhead</code></td>
@@ -294,6 +312,7 @@ To use a custom metrics.properties for the application master and executors, upd
   <td>
     Same as <code>spark.driver.memoryOverhead</code>, but for the YARN Application Master in client mode.
   </td>
+  <td>1.3.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.queue</code></td>
@@ -301,6 +320,7 @@ To use a custom metrics.properties for the application master and executors, upd
   <td>
     The name of the YARN queue to which the application is submitted.
   </td>
+  <td>1.0.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.jars</code></td>
@@ -312,6 +332,7 @@ To use a custom metrics.properties for the application master and executors, upd
     need to be distributed each time an application runs. To point to jars on HDFS, for example,
     set this configuration to <code>hdfs:///some/path</code>. Globs are allowed.
   </td>
+  <td>2.0.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.archive</code></td>
@@ -323,6 +344,7 @@ To use a custom metrics.properties for the application master and executors, upd
     Like with the previous option, the archive can also be hosted on HDFS to speed up file
     distribution.
   </td>
+  <td>2.0.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.appMasterEnv.[EnvironmentVariableName]</code></td>
@@ -334,6 +356,7 @@ To use a custom metrics.properties for the application master and executors, upd
      the environment of the Spark driver and in <code>client</code> mode it only controls
      the environment of the executor launcher.
   </td>
+  <td>1.1.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.containerLauncherMaxThreads</code></td>
@@ -341,6 +364,7 @@ To use a custom metrics.properties for the application master and executors, upd
   <td>
     The maximum number of threads to use in the YARN Application Master for launching executor containers.
   </td>
+  <td>1.2.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.am.extraJavaOptions</code></td>
@@ -351,6 +375,7 @@ To use a custom metrics.properties for the application master and executors, upd
   to set maximum heap size (-Xmx) settings with this option. Maximum heap size settings can be set
   with <code>spark.yarn.am.memory</code>
   </td>
+  <td>1.3.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.am.extraLibraryPath</code></td>
@@ -358,6 +383,7 @@ To use a custom metrics.properties for the application master and executors, upd
   <td>
     Set a special library path to use when launching the YARN Application Master in client mode.
   </td>
+  <td>1.4.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.maxAppAttempts</code></td>
@@ -366,6 +392,7 @@ To use a custom metrics.properties for the application master and executors, upd
   The maximum number of attempts that will be made to submit the application.
   It should be no larger than the global number of max attempts in the YARN configuration.
   </td>
+  <td>1.3.0</td> 
 </tr>
 <tr>
   <td><code>spark.yarn.am.attemptFailuresValidityInterval</code></td>
@@ -375,6 +402,7 @@ To use a custom metrics.properties for the application master and executors, upd
   If the AM has been running for at least the defined interval, the AM failure count will be reset.
   This feature is not enabled if not configured.
   </td>
+  <td>1.6.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.executor.failuresValidityInterval</code></td>
@@ -383,6 +411,7 @@ To use a custom metrics.properties for the application master and executors, upd
   Defines the validity interval for executor failure tracking.
   Executor failures which are older than the validity interval will be ignored.
   </td>
+  <td>2.0.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.submit.waitAppCompletion</code></td>
@@ -392,6 +421,7 @@ To use a custom metrics.properties for the application master and executors, upd
   If set to <code>true</code>, the client process will stay alive reporting the application's status.
   Otherwise, the client process will exit after submission.
   </td>
+  <td>1.4.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.am.nodeLabelExpression</code></td>
@@ -401,6 +431,7 @@ To use a custom metrics.properties for the application master and executors, upd
   Only versions of YARN greater than or equal to 2.6 support node label expressions, so when
   running against earlier versions, this property will be ignored.
   </td>
+  <td>1.6.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.executor.nodeLabelExpression</code></td>
@@ -410,6 +441,7 @@ To use a custom metrics.properties for the application master and executors, upd
   Only versions of YARN greater than or equal to 2.6 support node label expressions, so when
   running against earlier versions, this property will be ignored.
   </td>
+  <td>1.4.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.tags</code></td>
@@ -418,6 +450,7 @@ To use a custom metrics.properties for the application master and executors, upd
   Comma-separated list of strings to pass through as YARN application tags appearing
   in YARN ApplicationReports, which can be used for filtering when querying YARN apps.
   </td>
+  <td>1.5.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.priority</code></td>
@@ -427,6 +460,7 @@ To use a custom metrics.properties for the application master and executors, upd
   integer value have a better opportunity to be activated. Currently, YARN only supports application
   priority when using FIFO ordering policy.
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.config.gatewayPath</code></td>
@@ -446,6 +480,7 @@ To use a custom metrics.properties for the application master and executors, upd
   <code>$HADOOP_HOME</code> will make sure that paths used to launch remote processes properly
   reference the local YARN configuration.
   </td>
+  <td>1.5.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.config.replacementPath</code></td>
@@ -453,6 +488,7 @@ To use a custom metrics.properties for the application master and executors, upd
   <td>
   See <code>spark.yarn.config.gatewayPath</code>.
   </td>
+  <td>1.5.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.rolledLog.includePattern</code></td>
@@ -467,6 +503,7 @@ To use a custom metrics.properties for the application master and executors, upd
   on the file name configured in the log4j configuration (like spark.log), the user should set the
   regex (spark*) to include all the log files that need to be aggregated.
   </td>
+  <td>2.0.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.rolledLog.excludePattern</code></td>
@@ -476,6 +513,7 @@ To use a custom metrics.properties for the application master and executors, upd
   and those log files will not be aggregated in a rolling fashion. If the log file
   name matches both the include and the exclude pattern, this file will be excluded eventually.
   </td>
+  <td>2.0.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.blacklist.executor.launch.blacklisting.enabled</code></td>
@@ -485,6 +523,7 @@ To use a custom metrics.properties for the application master and executors, upd
   The error limit for blacklisting can be configured by
   <code>spark.blacklist.application.maxFailedExecutorsPerNode</code>.
   </td>
+  <td>2.4.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.exclude.nodes</code></td>
@@ -492,6 +531,7 @@ To use a custom metrics.properties for the application master and executors, upd
   <td>
   Comma-separated list of YARN node names which are excluded from resource allocation.
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.metrics.namespace</code></td>
@@ -500,6 +540,7 @@ To use a custom metrics.properties for the application master and executors, upd
   The root namespace for AM metrics reporting. 
   If it is not set then the YARN application ID is used.
   </td>
+  <td>2.4.0</td>
 </tr>
 </table>
 
@@ -583,7 +624,7 @@ staging directory of the Spark application.
 ## YARN-specific Kerberos Configuration
 
 <table class="table">
-<tr><th>Property Name</th><th>Default</th><th>Meaning</th></tr>
+<tr><th>Property Name</th><th>Default</th><th>Meaning</th><th>Since Version</th></tr>
 <tr>
   <td><code>spark.kerberos.keytab</code></td>
   <td>(none)</td>
@@ -595,6 +636,7 @@ staging directory of the Spark application.
 
   <br /> (Works also with the "local" master.)
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.kerberos.principal</code></td>
@@ -605,6 +647,7 @@ staging directory of the Spark application.
 
   <br /> (Works also with the "local" master.)
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.yarn.kerberos.relogin.period</code></td>
@@ -614,6 +657,7 @@ staging directory of the Spark application.
   that is shorter than the TGT renewal period (or the TGT lifetime if TGT renewal is not enabled).
   The default value should be enough for most deployments.
   </td>
+  <td>2.3.0</td>
 </tr>
 </table>
 
diff --git a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/config.scala b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/config.scala
index 8dfbef2..b3a3570 100644
--- a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/config.scala
+++ b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/config.scala
@@ -29,6 +29,7 @@ package object config {
   private[spark] val APPLICATION_TAGS = ConfigBuilder("spark.yarn.tags")
     .doc("Comma-separated list of strings to pass through as YARN application tags appearing " +
       "in YARN Application Reports, which can be used for filtering when querying YARN.")
+    .version("1.5.0")
     .stringConf
     .toSequence
     .createOptional
@@ -37,6 +38,7 @@ package object config {
     .doc("Application priority for YARN to define pending applications ordering policy, those" +
       " with higher value have a better opportunity to be activated. Currently, YARN only" +
       " supports application priority when using FIFO ordering policy.")
+    .version("3.0.0")
     .intConf
     .createOptional
 
@@ -44,6 +46,7 @@ package object config {
     ConfigBuilder("spark.yarn.am.attemptFailuresValidityInterval")
       .doc("Interval after which AM failures will be considered independent and " +
         "not accumulate towards the attempt count.")
+      .version("1.6.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .createOptional
 
@@ -51,36 +54,43 @@ package object config {
     ConfigBuilder("spark.yarn.executor.failuresValidityInterval")
       .doc("Interval after which Executor failures will be considered independent and not " +
         "accumulate towards the attempt count.")
+      .version("2.0.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .createOptional
 
   private[spark] val MAX_APP_ATTEMPTS = ConfigBuilder("spark.yarn.maxAppAttempts")
     .doc("Maximum number of AM attempts before failing the app.")
+    .version("1.3.0")
     .intConf
     .createOptional
 
   private[spark] val USER_CLASS_PATH_FIRST = ConfigBuilder("spark.yarn.user.classpath.first")
     .doc("Whether to place user jars in front of Spark's classpath.")
+    .version("1.3.0")
     .booleanConf
     .createWithDefault(false)
 
   private[spark] val GATEWAY_ROOT_PATH = ConfigBuilder("spark.yarn.config.gatewayPath")
     .doc("Root of configuration paths that is present on gateway nodes, and will be replaced " +
       "with the corresponding path in cluster machines.")
+    .version("1.5.0")
     .stringConf
     .createWithDefault(null)
 
   private[spark] val REPLACEMENT_ROOT_PATH = ConfigBuilder("spark.yarn.config.replacementPath")
     .doc(s"Path to use as a replacement for ${GATEWAY_ROOT_PATH.key} when launching processes " +
       "in the YARN cluster.")
+    .version("1.5.0")
     .stringConf
     .createWithDefault(null)
 
   private[spark] val QUEUE_NAME = ConfigBuilder("spark.yarn.queue")
+    .version("1.0.0")
     .stringConf
     .createWithDefault("default")
 
   private[spark] val HISTORY_SERVER_ADDRESS = ConfigBuilder("spark.yarn.historyServer.address")
+    .version("1.0.0")
     .stringConf
     .createOptional
 
@@ -88,6 +98,7 @@ package object config {
     ConfigBuilder("spark.yarn.historyServer.allowTracking")
       .doc("Allow using the History Server URL for the application as the tracking URL for the " +
         "application when the Web UI is not enabled.")
+      .version("2.2.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -95,37 +106,44 @@ package object config {
 
   private[spark] val SPARK_ARCHIVE = ConfigBuilder("spark.yarn.archive")
     .doc("Location of archive containing jars files with Spark classes.")
+    .version("2.0.0")
     .stringConf
     .createOptional
 
   private[spark] val SPARK_JARS = ConfigBuilder("spark.yarn.jars")
     .doc("Location of jars containing Spark classes.")
+    .version("2.0.0")
     .stringConf
     .toSequence
     .createOptional
 
   private[spark] val ARCHIVES_TO_DISTRIBUTE = ConfigBuilder("spark.yarn.dist.archives")
+    .version("1.0.0")
     .stringConf
     .toSequence
     .createWithDefault(Nil)
 
   private[spark] val FILES_TO_DISTRIBUTE = ConfigBuilder("spark.yarn.dist.files")
+    .version("1.0.0")
     .stringConf
     .toSequence
     .createWithDefault(Nil)
 
   private[spark] val JARS_TO_DISTRIBUTE = ConfigBuilder("spark.yarn.dist.jars")
+    .version("2.0.0")
     .stringConf
     .toSequence
     .createWithDefault(Nil)
 
   private[spark] val PRESERVE_STAGING_FILES = ConfigBuilder("spark.yarn.preserve.staging.files")
     .doc("Whether to preserve temporary files created by the job in HDFS.")
+    .version("1.1.0")
     .booleanConf
     .createWithDefault(false)
 
   private[spark] val STAGING_FILE_REPLICATION = ConfigBuilder("spark.yarn.submit.file.replication")
     .doc("Replication factor for files uploaded by Spark to HDFS.")
+    .version("0.8.1")
     .intConf
     .createOptional
 
@@ -134,93 +152,111 @@ package object config {
   private[spark] val WAIT_FOR_APP_COMPLETION = ConfigBuilder("spark.yarn.submit.waitAppCompletion")
     .doc("In cluster mode, whether to wait for the application to finish before exiting the " +
       "launcher process.")
+    .version("1.4.0")
     .booleanConf
     .createWithDefault(true)
 
   private[spark] val REPORT_INTERVAL = ConfigBuilder("spark.yarn.report.interval")
     .doc("Interval between reports of the current app status.")
+    .version("0.9.0")
     .timeConf(TimeUnit.MILLISECONDS)
     .createWithDefaultString("1s")
 
   private[spark] val CLIENT_LAUNCH_MONITOR_INTERVAL =
     ConfigBuilder("spark.yarn.clientLaunchMonitorInterval")
       .doc("Interval between requests for status the client mode AM when starting the app.")
+      .version("2.3.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .createWithDefaultString("1s")
 
   /* Shared Client-mode AM / Driver configuration. */
 
   private[spark] val AM_MAX_WAIT_TIME = ConfigBuilder("spark.yarn.am.waitTime")
+    .version("1.3.0")
     .timeConf(TimeUnit.MILLISECONDS)
     .createWithDefaultString("100s")
 
   private[spark] val YARN_METRICS_NAMESPACE = ConfigBuilder("spark.yarn.metrics.namespace")
     .doc("The root namespace for AM metrics reporting.")
+    .version("2.4.0")
     .stringConf
     .createOptional
 
   private[spark] val AM_NODE_LABEL_EXPRESSION = ConfigBuilder("spark.yarn.am.nodeLabelExpression")
     .doc("Node label expression for the AM.")
+    .version("1.6.0")
     .stringConf
     .createOptional
 
   private[spark] val CONTAINER_LAUNCH_MAX_THREADS =
     ConfigBuilder("spark.yarn.containerLauncherMaxThreads")
+      .version("1.2.0")
       .intConf
       .createWithDefault(25)
 
   private[spark] val MAX_EXECUTOR_FAILURES = ConfigBuilder("spark.yarn.max.executor.failures")
+    .version("1.0.0")
     .intConf
     .createOptional
 
   private[spark] val MAX_REPORTER_THREAD_FAILURES =
     ConfigBuilder("spark.yarn.scheduler.reporterThread.maxFailures")
+      .version("1.2.0")
       .intConf
       .createWithDefault(5)
 
   private[spark] val RM_HEARTBEAT_INTERVAL =
     ConfigBuilder("spark.yarn.scheduler.heartbeat.interval-ms")
+      .version("0.8.1")
       .timeConf(TimeUnit.MILLISECONDS)
       .createWithDefaultString("3s")
 
   private[spark] val INITIAL_HEARTBEAT_INTERVAL =
     ConfigBuilder("spark.yarn.scheduler.initial-allocation.interval")
+      .version("1.4.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .createWithDefaultString("200ms")
 
   private[spark] val AM_FINAL_MSG_LIMIT = ConfigBuilder("spark.yarn.am.finalMessageLimit")
     .doc("The limit size of final diagnostic message for our ApplicationMaster to unregister from" +
       " the ResourceManager.")
+    .version("2.4.0")
     .bytesConf(ByteUnit.BYTE)
     .createWithDefaultString("1m")
 
   /* Client-mode AM configuration. */
 
   private[spark] val AM_CORES = ConfigBuilder("spark.yarn.am.cores")
+    .version("1.3.0")
     .intConf
     .createWithDefault(1)
 
   private[spark] val AM_JAVA_OPTIONS = ConfigBuilder("spark.yarn.am.extraJavaOptions")
     .doc("Extra Java options for the client-mode AM.")
+    .version("1.3.0")
     .stringConf
     .createOptional
 
   private[spark] val AM_LIBRARY_PATH = ConfigBuilder("spark.yarn.am.extraLibraryPath")
     .doc("Extra native library path for the client-mode AM.")
+    .version("1.4.0")
     .stringConf
     .createOptional
 
   private[spark] val AM_MEMORY_OVERHEAD = ConfigBuilder("spark.yarn.am.memoryOverhead")
+    .version("1.3.0")
     .bytesConf(ByteUnit.MiB)
     .createOptional
 
   private[spark] val AM_MEMORY = ConfigBuilder("spark.yarn.am.memory")
+    .version("1.3.0")
     .bytesConf(ByteUnit.MiB)
     .createWithDefaultString("512m")
 
   /* Driver configuration. */
 
   private[spark] val DRIVER_APP_UI_ADDRESS = ConfigBuilder("spark.driver.appUIAddress")
+    .version("1.1.0")
     .stringConf
     .createOptional
 
@@ -229,6 +265,7 @@ package object config {
   private[spark] val EXECUTOR_NODE_LABEL_EXPRESSION =
     ConfigBuilder("spark.yarn.executor.nodeLabelExpression")
       .doc("Node label expression for executors.")
+      .version("1.4.0")
       .stringConf
       .createOptional
 
@@ -237,6 +274,7 @@ package object config {
   private[spark] val YARN_UNMANAGED_AM = ConfigBuilder("spark.yarn.unmanagedAM.enabled")
     .doc("In client mode, whether to launch the Application Master service as part of the client " +
       "using unmanaged am.")
+    .version("3.0.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -246,6 +284,7 @@ package object config {
     ConfigBuilder("spark.yarn.rolledLog.includePattern")
       .doc("Java Regex to filter the log files which match the defined include pattern and those " +
         "log files will be aggregated in a rolling fashion.")
+      .version("2.0.0")
       .stringConf
       .createOptional
 
@@ -253,6 +292,7 @@ package object config {
     ConfigBuilder("spark.yarn.rolledLog.excludePattern")
       .doc("Java Regex to filter the log files which match the defined exclude pattern and those " +
         "log files will not be aggregated in a rolling fashion.")
+      .version("2.0.0")
       .stringConf
       .createOptional
 
@@ -261,6 +301,7 @@ package object config {
   // Internal config to propagate the location of the user's jar to the driver/executors
   private[spark] val APP_JAR = ConfigBuilder("spark.yarn.user.jar")
     .internal()
+    .version("1.1.0")
     .stringConf
     .createOptional
 
@@ -268,6 +309,7 @@ package object config {
   // of the executors
   private[spark] val SECONDARY_JARS = ConfigBuilder("spark.yarn.secondary.jars")
     .internal()
+    .version("0.9.2")
     .stringConf
     .toSequence
     .createOptional
@@ -276,24 +318,28 @@ package object config {
 
   private[spark] val CACHED_FILES = ConfigBuilder("spark.yarn.cache.filenames")
     .internal()
+    .version("2.0.0")
     .stringConf
     .toSequence
     .createWithDefault(Nil)
 
   private[spark] val CACHED_FILES_SIZES = ConfigBuilder("spark.yarn.cache.sizes")
     .internal()
+    .version("2.0.0")
     .longConf
     .toSequence
     .createWithDefault(Nil)
 
   private[spark] val CACHED_FILES_TIMESTAMPS = ConfigBuilder("spark.yarn.cache.timestamps")
     .internal()
+    .version("2.0.0")
     .longConf
     .toSequence
     .createWithDefault(Nil)
 
   private[spark] val CACHED_FILES_VISIBILITIES = ConfigBuilder("spark.yarn.cache.visibilities")
     .internal()
+    .version("2.0.0")
     .stringConf
     .toSequence
     .createWithDefault(Nil)
@@ -301,6 +347,7 @@ package object config {
   // Either "file" or "archive", for each file.
   private[spark] val CACHED_FILES_TYPES = ConfigBuilder("spark.yarn.cache.types")
     .internal()
+    .version("2.0.0")
     .stringConf
     .toSequence
     .createWithDefault(Nil)
@@ -308,20 +355,23 @@ package object config {
   // The location of the conf archive in HDFS.
   private[spark] val CACHED_CONF_ARCHIVE = ConfigBuilder("spark.yarn.cache.confArchive")
     .internal()
+    .version("2.0.0")
     .stringConf
     .createOptional
 
   /* YARN allocator-level blacklisting related config entries. */
   private[spark] val YARN_EXECUTOR_LAUNCH_BLACKLIST_ENABLED =
     ConfigBuilder("spark.yarn.blacklist.executor.launch.blacklisting.enabled")
+      .version("2.4.0")
       .booleanConf
       .createWithDefault(false)
 
   /* Initially blacklisted YARN nodes. */
   private[spark] val YARN_EXCLUDE_NODES = ConfigBuilder("spark.yarn.exclude.nodes")
-      .stringConf
-      .toSequence
-      .createWithDefault(Nil)
+    .version("3.0.0")
+    .stringConf
+    .toSequence
+    .createWithDefault(Nil)
 
   private[yarn] val YARN_EXECUTOR_RESOURCE_TYPES_PREFIX = "spark.yarn.executor.resource."
   private[yarn] val YARN_DRIVER_RESOURCE_TYPES_PREFIX = "spark.yarn.driver.resource."


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org


[spark] 03/03: [SPARK-31118][K8S][DOC] Add version information to the configuration of K8S

Posted by gu...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/spark.git

commit aedaa6214144d953f1d8c2ce4fc85ac0f6a6f58a
Author: beliefer <be...@163.com>
AuthorDate: Thu Mar 12 09:54:08 2020 +0900

    [SPARK-31118][K8S][DOC] Add version information to the configuration of K8S
    
    Add version information to the configuration of `K8S`.
    
    I sorted out some information show below.
    
    Item name | Since version | JIRA ID | Commit ID | Note
    -- | -- | -- | -- | --
    spark.kubernetes.context | 3.0.0 | SPARK-25887 | c542c247bbfe1214c0bf81076451718a9e8931dc#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.driver.master | 3.0.0 | SPARK-30371 | f14061c6a4729ad419902193aa23575d8f17f597#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.namespace | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.container.image | 2.3.0 | SPARK-22994 | b94debd2b01b87ef1d2a34d48877e38ade0969e6#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.driver.container.image | 2.3.0 | SPARK-22807 | fb3636b482be3d0940345b1528c1d5090bbc25e6#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.executor.container.image | 2.3.0 | SPARK-22807 | fb3636b482be3d0940345b1528c1d5090bbc25e6#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.container.image.pullPolicy | 2.3.0 | SPARK-22807 | fb3636b482be3d0940345b1528c1d5090bbc25e6#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.container.image.pullSecrets | 2.4.0 | SPARK-23668 | cccaaa14ad775fb981e501452ba2cc06ff5c0f0a#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.submission.requestTimeout | 3.0.0 | SPARK-27023 | e9e8bb33ef9ad785473ded168bc85867dad4ee70#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.submission.connectionTimeout | 3.0.0 | SPARK-27023 | e9e8bb33ef9ad785473ded168bc85867dad4ee70#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.driver.requestTimeout | 3.0.0 | SPARK-27023 | e9e8bb33ef9ad785473ded168bc85867dad4ee70#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.driver.connectionTimeout | 3.0.0 | SPARK-27023 | e9e8bb33ef9ad785473ded168bc85867dad4ee70#diff-6e882d5561424e7e6651eb46f10104b8 |  
    KUBERNETES_AUTH_DRIVER_CONF_PREFIX.serviceAccountName | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 | spark.kubernetes.authenticate.driver
    KUBERNETES_AUTH_EXECUTOR_CONF_PREFIX.serviceAccountName | 3.1.0 | SPARK-30122 | f9f06eee9853ad4b6458ac9d31233e729a1ca226#diff-6e882d5561424e7e6651eb46f10104b8 | spark.kubernetes.authenticate.executor
    spark.kubernetes.driver.limit.cores | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.driver.request.cores | 3.0.0 | SPARK-27754 | 1a8c09334db87b0e938c38cd6b59d326bdcab3c3#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.submitInDriver | 2.4.0 | SPARK-22839 | f15906da153f139b698e192ec6f82f078f896f1e#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.executor.limit.cores | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.executor.scheduler.name | 3.0.0 | SPARK-29436 | f800fa383131559c4e841bf062c9775d09190935#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.executor.request.cores | 2.4.0 | SPARK-23285 | fe2b7a4568d65a62da6e6eb00fff05f248b4332c#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.driver.pod.name | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.driver.resourceNamePrefix | 3.0.0 | SPARK-25876 | 6be272b75b4ae3149869e19df193675cc4117763#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.executor.podNamePrefix | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.allocation.batch.size | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.allocation.batch.delay | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.executor.lostCheck.maxAttempts | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.submission.waitAppCompletion | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.report.interval | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.executor.apiPollingInterval | 2.4.0 | SPARK-24248 | 270a9a3cac25f3e799460320d0fc94ccd7ecfaea#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.executor.eventProcessingInterval | 2.4.0 | SPARK-24248 | 270a9a3cac25f3e799460320d0fc94ccd7ecfaea#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.memoryOverheadFactor | 2.4.0 | SPARK-23984 | 1a644afbac35c204f9ad55f86999319a9ab458c6#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.pyspark.pythonVersion | 2.4.0 | SPARK-23984 | a791c29bd824adadfb2d85594bc8dad4424df936#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.kerberos.krb5.path | 3.0.0 | SPARK-23257 | 6c9c84ffb9c8d98ee2ece7ba4b010856591d383d#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.kerberos.krb5.configMapName | 3.0.0 | SPARK-23257 | 6c9c84ffb9c8d98ee2ece7ba4b010856591d383d#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.hadoop.configMapName | 3.0.0 | SPARK-23257 | 6c9c84ffb9c8d98ee2ece7ba4b010856591d383d#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.kerberos.tokenSecret.name | 3.0.0 | SPARK-23257 | 6c9c84ffb9c8d98ee2ece7ba4b010856591d383d#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.kerberos.tokenSecret.itemKey | 3.0.0 | SPARK-23257 | 6c9c84ffb9c8d98ee2ece7ba4b010856591d383d#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.resource.type | 2.4.1 | SPARK-25021 | 9031c784847353051bc0978f63ef4146ae9095ff#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.local.dirs.tmpfs | 3.0.0 | SPARK-25262 | da6fa3828bb824b65f50122a8a0a0d4741551257#diff-6e882d5561424e7e6651eb46f10104b8 | It exists in branch-3.0, but in pom.xml it is 2.4.0-snapshot
    spark.kubernetes.driver.podTemplateFile | 3.0.0 | SPARK-24434 | f6cc354d83c2c9a757f9b507aadd4dbdc5825cca#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.executor.podTemplateFile | 3.0.0 | SPARK-24434 | f6cc354d83c2c9a757f9b507aadd4dbdc5825cca#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.driver.podTemplateContainerName | 3.0.0 | SPARK-24434 | f6cc354d83c2c9a757f9b507aadd4dbdc5825cca#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.executor.podTemplateContainerName | 3.0.0 | SPARK-24434 | f6cc354d83c2c9a757f9b507aadd4dbdc5825cca#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.executor.deleteOnTermination | 3.0.0 | SPARK-25515 | 0c2935b01def8a5f631851999d9c2d57b63763e6#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.dynamicAllocation.deleteGracePeriod | 3.0.0 | SPARK-28487 | 0343854f54b48b206ca434accec99355011560c2#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.appKillPodDeletionGracePeriod | 3.0.0 | SPARK-24793 | 05168e725d2a17c4164ee5f9aa068801ec2454f4#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.file.upload.path | 3.0.0 | SPARK-23153 | 5e74570c8f5e7dfc1ca1c53c177827c5cea57bf1#diff-6e882d5561424e7e6651eb46f10104b8 |  
    The following appears in the document |   |   |   |  
    spark.kubernetes.authenticate.submission.caCertFile | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.authenticate.submission.clientKeyFile | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.authenticate.submission.clientCertFile | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.authenticate.submission.oauthToken | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.authenticate.submission.oauthTokenFile | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.authenticate.driver.caCertFile | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.authenticate.driver.clientKeyFile | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.authenticate.driver.clientCertFile | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.authenticate.driver.oauthToken | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.authenticate.driver.oauthTokenFile | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.authenticate.driver.mounted.caCertFile | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.authenticate.driver.mounted.clientKeyFile | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.authenticate.driver.mounted.clientCertFile | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.authenticate.driver.mounted.oauthTokenFile | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.authenticate.caCertFile | 2.4.0 | SPARK-23146 | 571a6f0574e50e53cea403624ec3795cd03aa204#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.authenticate.clientKeyFile | 2.4.0 | SPARK-23146 | 571a6f0574e50e53cea403624ec3795cd03aa204#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.authenticate.clientCertFile | 2.4.0 | SPARK-23146 | 571a6f0574e50e53cea403624ec3795cd03aa204#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.authenticate.oauthToken | 2.4.0 | SPARK-23146 | 571a6f0574e50e53cea403624ec3795cd03aa204#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.authenticate.oauthTokenFile | 2.4.0 | SPARK-23146 | 571a6f0574e50e53cea403624ec3795cd03aa204#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.driver.label.[LabelName] | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.driver.annotation.[AnnotationName] | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.executor.label.[LabelName] | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.executor.annotation.[AnnotationName] | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.node.selector.[labelKey] | 2.3.0 | SPARK-18278 | e9b2070ab2d04993b1c0c1d6c6aba249e6664c8d#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.driverEnv.[EnvironmentVariableName] | 2.3.0 | SPARK-22646 | 3f4060c340d6bac412e8819c4388ccba226efcf3#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.driver.secrets.[SecretName] | 2.3.0 | SPARK-22757 | 171f6ddadc6185ffcc6ad82e5f48952fb49095b2#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.executor.secrets.[SecretName] | 2.3.0 | SPARK-22757 | 171f6ddadc6185ffcc6ad82e5f48952fb49095b2#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.driver.secretKeyRef.[EnvName] | 2.4.0 | SPARK-24232 | 21e1fc7d4aed688d7b685be6ce93f76752159c98#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.executor.secretKeyRef.[EnvName] | 2.4.0 | SPARK-24232 | 21e1fc7d4aed688d7b685be6ce93f76752159c98#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.driver.volumes.[VolumeType].[VolumeName].mount.path | 2.4.0 | SPARK-23529 | 5ff1b9ba1983d5601add62aef64a3e87d07050eb#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.driver.volumes.[VolumeType].[VolumeName].mount.subPath | 3.0.0 | SPARK-25960 | 3df307aa515b3564686e75d1b71754bbcaaf2dec#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.driver.volumes.[VolumeType].[VolumeName].mount.readOnly | 2.4.0 | SPARK-23529 | 5ff1b9ba1983d5601add62aef64a3e87d07050eb#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.driver.volumes.[VolumeType].[VolumeName].options.[OptionName] | 2.4.0 | SPARK-23529 | 5ff1b9ba1983d5601add62aef64a3e87d07050eb#diff-b5527f236b253e0d9f5db5164bdb43e9 |  
    spark.kubernetes.executor.volumes.[VolumeType].[VolumeName].mount.path | 2.4.0 | SPARK-23529 | 5ff1b9ba1983d5601add62aef64a3e87d07050eb#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.executor.volumes.[VolumeType].[VolumeName].mount.subPath | 3.0.0 | SPARK-25960 | 3df307aa515b3564686e75d1b71754bbcaaf2dec#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.executor.volumes.[VolumeType].[VolumeName].mount.readOnly | 2.4.0 | SPARK-23529 | 5ff1b9ba1983d5601add62aef64a3e87d07050eb#diff-6e882d5561424e7e6651eb46f10104b8 |  
    spark.kubernetes.executor.volumes.[VolumeType].[VolumeName].options.[OptionName] | 2.4.0 | SPARK-23529 | 5ff1b9ba1983d5601add62aef64a3e87d07050eb#diff-b5527f236b253e0d9f5db5164bdb43e9 |  
    
    Supplemental configuration version information.
    
    'No'
    
    Exists UT
    
    Closes #27875 from beliefer/add-version-to-k8s-config.
    
    Authored-by: beliefer <be...@163.com>
    Signed-off-by: HyukjinKwon <gu...@apache.org>
---
 docs/running-on-kubernetes.md                      | 77 +++++++++++++++++++++-
 .../scala/org/apache/spark/deploy/k8s/Config.scala | 48 +++++++++++++-
 2 files changed, 123 insertions(+), 2 deletions(-)

diff --git a/docs/running-on-kubernetes.md b/docs/running-on-kubernetes.md
index a941b8c..4f228a5 100644
--- a/docs/running-on-kubernetes.md
+++ b/docs/running-on-kubernetes.md
@@ -494,7 +494,7 @@ See the [configuration page](configuration.html) for information on Spark config
 #### Spark Properties
 
 <table class="table">
-<tr><th>Property Name</th><th>Default</th><th>Meaning</th></tr>
+<tr><th>Property Name</th><th>Default</th><th>Meaning</th><th>Since Version</th></tr>
 <tr>
   <td><code>spark.kubernetes.context</code></td>
   <td><code>(none)</code></td>
@@ -505,6 +505,7 @@ See the [configuration page](configuration.html) for information on Spark config
     auto-configured settings can be overridden by the use of other Spark
     configuration properties e.g. <code>spark.kubernetes.namespace</code>.
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.driver.master</code></td>
@@ -512,6 +513,7 @@ See the [configuration page](configuration.html) for information on Spark config
   <td>
     The internal Kubernetes master (API server) address to be used for driver to request executors.
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.namespace</code></td>
@@ -519,6 +521,7 @@ See the [configuration page](configuration.html) for information on Spark config
   <td>
     The namespace that will be used for running the driver and executor pods.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.container.image</code></td>
@@ -529,6 +532,7 @@ See the [configuration page](configuration.html) for information on Spark config
     This configuration is required and must be provided by the user, unless explicit
     images are provided for each different container type.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.driver.container.image</code></td>
@@ -536,6 +540,7 @@ See the [configuration page](configuration.html) for information on Spark config
   <td>
     Custom container image to use for the driver.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.executor.container.image</code></td>
@@ -543,6 +548,7 @@ See the [configuration page](configuration.html) for information on Spark config
   <td>
     Custom container image to use for executors.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.container.image.pullPolicy</code></td>
@@ -550,6 +556,7 @@ See the [configuration page](configuration.html) for information on Spark config
   <td>
     Container image pull policy used when pulling images within Kubernetes.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.container.image.pullSecrets</code></td>
@@ -557,6 +564,7 @@ See the [configuration page](configuration.html) for information on Spark config
   <td>
     Comma separated list of Kubernetes secrets used to pull images from private image registries.
   </td>
+  <td>2.4.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.allocation.batch.size</code></td>
@@ -564,6 +572,7 @@ See the [configuration page](configuration.html) for information on Spark config
   <td>
     Number of pods to launch at once in each round of executor pod allocation.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.allocation.batch.delay</code></td>
@@ -572,6 +581,7 @@ See the [configuration page](configuration.html) for information on Spark config
     Time to wait between each round of executor pod allocation. Specifying values less than 1 second may lead to
     excessive CPU usage on the spark driver.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.authenticate.submission.caCertFile</code></td>
@@ -581,6 +591,7 @@ See the [configuration page](configuration.html) for information on Spark config
     must be located on the submitting machine's disk. Specify this as a path as opposed to a URI (i.e. do not provide
     a scheme). In client mode, use <code>spark.kubernetes.authenticate.caCertFile</code> instead.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.authenticate.submission.clientKeyFile</code></td>
@@ -590,6 +601,7 @@ See the [configuration page](configuration.html) for information on Spark config
     must be located on the submitting machine's disk. Specify this as a path as opposed to a URI (i.e. do not provide
     a scheme). In client mode, use <code>spark.kubernetes.authenticate.clientKeyFile</code> instead.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.authenticate.submission.clientCertFile</code></td>
@@ -599,6 +611,7 @@ See the [configuration page](configuration.html) for information on Spark config
     file must be located on the submitting machine's disk. Specify this as a path as opposed to a URI (i.e. do not
     provide a scheme). In client mode, use <code>spark.kubernetes.authenticate.clientCertFile</code> instead.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.authenticate.submission.oauthToken</code></td>
@@ -608,6 +621,7 @@ See the [configuration page](configuration.html) for information on Spark config
     that unlike the other authentication options, this is expected to be the exact string value of the token to use for
     the authentication. In client mode, use <code>spark.kubernetes.authenticate.oauthToken</code> instead.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.authenticate.submission.oauthTokenFile</code></td>
@@ -617,6 +631,7 @@ See the [configuration page](configuration.html) for information on Spark config
     This file must be located on the submitting machine's disk. Specify this as a path as opposed to a URI (i.e. do not
     provide a scheme). In client mode, use <code>spark.kubernetes.authenticate.oauthTokenFile</code> instead.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.authenticate.driver.caCertFile</code></td>
@@ -627,6 +642,7 @@ See the [configuration page](configuration.html) for information on Spark config
     Specify this as a path as opposed to a URI (i.e. do not provide a scheme). In client mode, use
     <code>spark.kubernetes.authenticate.caCertFile</code> instead.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.authenticate.driver.clientKeyFile</code></td>
@@ -637,6 +653,7 @@ See the [configuration page](configuration.html) for information on Spark config
     a Kubernetes secret. Specify this as a path as opposed to a URI (i.e. do not provide a scheme).
     In client mode, use <code>spark.kubernetes.authenticate.clientKeyFile</code> instead.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.authenticate.driver.clientCertFile</code></td>
@@ -647,6 +664,7 @@ See the [configuration page](configuration.html) for information on Spark config
     driver pod as a Kubernetes secret. Specify this as a path as opposed to a URI (i.e. do not provide a scheme).
     In client mode, use <code>spark.kubernetes.authenticate.clientCertFile</code> instead.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.authenticate.driver.oauthToken</code></td>
@@ -657,6 +675,7 @@ See the [configuration page](configuration.html) for information on Spark config
     the token to use for the authentication. This token value is uploaded to the driver pod as a Kubernetes secret.
     In client mode, use <code>spark.kubernetes.authenticate.oauthToken</code> instead.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.authenticate.driver.oauthTokenFile</code></td>
@@ -667,6 +686,7 @@ See the [configuration page](configuration.html) for information on Spark config
     the token to use for the authentication. This token value is uploaded to the driver pod as a secret. In client mode, use
     <code>spark.kubernetes.authenticate.oauthTokenFile</code> instead.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.authenticate.driver.mounted.caCertFile</code></td>
@@ -677,6 +697,7 @@ See the [configuration page](configuration.html) for information on Spark config
     Specify this as a path as opposed to a URI (i.e. do not provide a scheme). In client mode, use
     <code>spark.kubernetes.authenticate.caCertFile</code> instead.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.authenticate.driver.mounted.clientKeyFile</code></td>
@@ -687,6 +708,7 @@ See the [configuration page](configuration.html) for information on Spark config
     Specify this as a path as opposed to a URI (i.e. do not provide a scheme). In client mode, use
     <code>spark.kubernetes.authenticate.clientKeyFile</code> instead.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.authenticate.driver.mounted.clientCertFile</code></td>
@@ -697,6 +719,7 @@ See the [configuration page](configuration.html) for information on Spark config
     Specify this as a path as opposed to a URI (i.e. do not provide a scheme). In client mode, use
     <code>spark.kubernetes.authenticate.clientCertFile</code> instead.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.authenticate.driver.mounted.oauthTokenFile</code></td>
@@ -707,6 +730,7 @@ See the [configuration page](configuration.html) for information on Spark config
     Note that unlike the other authentication options, this file must contain the exact string value of the token to use
     for the authentication. In client mode, use <code>spark.kubernetes.authenticate.oauthTokenFile</code> instead.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.authenticate.driver.serviceAccountName</code></td>
@@ -716,6 +740,7 @@ See the [configuration page](configuration.html) for information on Spark config
     executor pods from the API server. Note that this cannot be specified alongside a CA cert file, client key file,
     client cert file, and/or OAuth token. In client mode, use <code>spark.kubernetes.authenticate.serviceAccountName</code> instead.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.authenticate.caCertFile</code></td>
@@ -724,6 +749,7 @@ See the [configuration page](configuration.html) for information on Spark config
     In client mode, path to the CA cert file for connecting to the Kubernetes API server over TLS when
     requesting executors. Specify this as a path as opposed to a URI (i.e. do not provide a scheme).
   </td>
+  <td>2.4.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.authenticate.clientKeyFile</code></td>
@@ -732,6 +758,7 @@ See the [configuration page](configuration.html) for information on Spark config
     In client mode, path to the client key file for authenticating against the Kubernetes API server
     when requesting executors. Specify this as a path as opposed to a URI (i.e. do not provide a scheme).
   </td>
+  <td>2.4.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.authenticate.clientCertFile</code></td>
@@ -740,6 +767,7 @@ See the [configuration page](configuration.html) for information on Spark config
     In client mode, path to the client cert file for authenticating against the Kubernetes API server
     when requesting executors. Specify this as a path as opposed to a URI (i.e. do not provide a scheme).
   </td>
+  <td>2.4.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.authenticate.oauthToken</code></td>
@@ -749,6 +777,7 @@ See the [configuration page](configuration.html) for information on Spark config
     requesting executors. Note that unlike the other authentication options, this must be the exact string value of
     the token to use for the authentication.
   </td>
+  <td>2.4.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.authenticate.oauthTokenFile</code></td>
@@ -757,6 +786,7 @@ See the [configuration page](configuration.html) for information on Spark config
     In client mode, path to the file containing the OAuth token to use when authenticating against the Kubernetes API
     server when requesting executors.
   </td>
+  <td>2.4.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.driver.label.[LabelName]</code></td>
@@ -767,6 +797,7 @@ See the [configuration page](configuration.html) for information on Spark config
     Note that Spark also adds its own labels to the driver pod
     for bookkeeping purposes.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.driver.annotation.[AnnotationName]</code></td>
@@ -775,6 +806,7 @@ See the [configuration page](configuration.html) for information on Spark config
     Add the annotation specified by <code>AnnotationName</code> to the driver pod.
     For example, <code>spark.kubernetes.driver.annotation.something=true</code>.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.executor.label.[LabelName]</code></td>
@@ -785,6 +817,7 @@ See the [configuration page](configuration.html) for information on Spark config
     Note that Spark also adds its own labels to the executor pod
     for bookkeeping purposes.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.executor.annotation.[AnnotationName]</code></td>
@@ -793,6 +826,7 @@ See the [configuration page](configuration.html) for information on Spark config
     Add the annotation specified by <code>AnnotationName</code> to the executor pods.
     For example, <code>spark.kubernetes.executor.annotation.something=true</code>.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.driver.pod.name</code></td>
@@ -804,6 +838,7 @@ See the [configuration page](configuration.html) for information on Spark config
     value in client mode allows the driver to become the owner of its executor pods, which in turn allows the executor
     pods to be garbage collected by the cluster.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.executor.lostCheck.maxAttempts</code></td>
@@ -813,6 +848,7 @@ See the [configuration page](configuration.html) for information on Spark config
     The loss reason is used to ascertain whether the executor failure is due to a framework or an application error
     which in turn decides whether the executor is removed and replaced, or placed into a failed state for debugging.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.submission.waitAppCompletion</code></td>
@@ -821,6 +857,7 @@ See the [configuration page](configuration.html) for information on Spark config
     In cluster mode, whether to wait for the application to finish before exiting the launcher process.  When changed to
     false, the launcher has a "fire-and-forget" behavior when launching the Spark job.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.report.interval</code></td>
@@ -828,6 +865,7 @@ See the [configuration page](configuration.html) for information on Spark config
   <td>
     Interval between reports of the current Spark job status in cluster mode.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.driver.request.cores</code></td>
@@ -837,6 +875,7 @@ See the [configuration page](configuration.html) for information on Spark config
     Example values include 0.1, 500m, 1.5, 5, etc., with the definition of cpu units documented in <a href="https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/#cpu-units">CPU units</a>.
     This takes precedence over <code>spark.driver.cores</code> for specifying the driver pod cpu request if set.
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.driver.limit.cores</code></td>
@@ -844,6 +883,7 @@ See the [configuration page](configuration.html) for information on Spark config
   <td>
     Specify a hard cpu <a href="https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container">limit</a> for the driver pod.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.executor.request.cores</code></td>
@@ -854,6 +894,7 @@ See the [configuration page](configuration.html) for information on Spark config
     This is distinct from <code>spark.executor.cores</code>: it is only used and takes precedence over <code>spark.executor.cores</code> for specifying the executor pod cpu request if set. Task
     parallelism, e.g., number of tasks an executor can run concurrently is not affected by this.
   </td>
+  <td>2.4.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.executor.limit.cores</code></td>
@@ -861,6 +902,7 @@ See the [configuration page](configuration.html) for information on Spark config
   <td>
     Specify a hard cpu <a href="https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container">limit</a> for each executor pod launched for the Spark Application.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.node.selector.[labelKey]</code></td>
@@ -871,6 +913,7 @@ See the [configuration page](configuration.html) for information on Spark config
     will result in the driver pod and executors having a node selector with key <code>identifier</code> and value
      <code>myIdentifier</code>. Multiple node selector keys can be added by setting multiple configurations with this prefix.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.driverEnv.[EnvironmentVariableName]</code></td>
@@ -879,6 +922,7 @@ See the [configuration page](configuration.html) for information on Spark config
     Add the environment variable specified by <code>EnvironmentVariableName</code> to
     the Driver process. The user can specify multiple of these to set multiple environment variables.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.driver.secrets.[SecretName]</code></td>
@@ -887,6 +931,7 @@ See the [configuration page](configuration.html) for information on Spark config
    Add the <a href="https://kubernetes.io/docs/concepts/configuration/secret/">Kubernetes Secret</a> named <code>SecretName</code> to the driver pod on the path specified in the value. For example,
    <code>spark.kubernetes.driver.secrets.spark-secret=/etc/secrets</code>.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.executor.secrets.[SecretName]</code></td>
@@ -895,6 +940,7 @@ See the [configuration page](configuration.html) for information on Spark config
    Add the <a href="https://kubernetes.io/docs/concepts/configuration/secret/">Kubernetes Secret</a> named <code>SecretName</code> to the executor pod on the path specified in the value. For example,
    <code>spark.kubernetes.executor.secrets.spark-secret=/etc/secrets</code>.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.driver.secretKeyRef.[EnvName]</code></td>
@@ -903,6 +949,7 @@ See the [configuration page](configuration.html) for information on Spark config
    Add as an environment variable to the driver container with name EnvName (case sensitive), the value referenced by key <code> key </code> in the data of the referenced <a href="https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-environment-variables">Kubernetes Secret</a>. For example,
    <code>spark.kubernetes.driver.secretKeyRef.ENV_VAR=spark-secret:key</code>.
   </td>
+  <td>2.4.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.executor.secretKeyRef.[EnvName]</code></td>
@@ -911,6 +958,7 @@ See the [configuration page](configuration.html) for information on Spark config
    Add as an environment variable to the executor container with name EnvName (case sensitive), the value referenced by key <code> key </code> in the data of the referenced <a href="https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-environment-variables">Kubernetes Secret</a>. For example,
    <code>spark.kubernetes.executor.secrets.ENV_VAR=spark-secret:key</code>.
   </td>
+  <td>2.4.0</td>
 </tr>   
 <tr>
   <td><code>spark.kubernetes.driver.volumes.[VolumeType].[VolumeName].mount.path</code></td>
@@ -919,6 +967,7 @@ See the [configuration page](configuration.html) for information on Spark config
    Add the <a href="https://kubernetes.io/docs/concepts/storage/volumes/">Kubernetes Volume</a> named <code>VolumeName</code> of the <code>VolumeType</code> type to the driver pod on the path specified in the value. For example,
    <code>spark.kubernetes.driver.volumes.persistentVolumeClaim.checkpointpvc.mount.path=/checkpoint</code>.
   </td>
+  <td>2.4.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.driver.volumes.[VolumeType].[VolumeName].mount.subPath</code></td>
@@ -927,6 +976,7 @@ See the [configuration page](configuration.html) for information on Spark config
    Specifies a <a href="https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath">subpath</a> to be mounted from the volume into the driver pod.
    <code>spark.kubernetes.driver.volumes.persistentVolumeClaim.checkpointpvc.mount.subPath=checkpoint</code>.
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.driver.volumes.[VolumeType].[VolumeName].mount.readOnly</code></td>
@@ -935,6 +985,7 @@ See the [configuration page](configuration.html) for information on Spark config
    Specify if the mounted volume is read only or not. For example,
    <code>spark.kubernetes.driver.volumes.persistentVolumeClaim.checkpointpvc.mount.readOnly=false</code>.
   </td>
+  <td>2.4.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.driver.volumes.[VolumeType].[VolumeName].options.[OptionName]</code></td>
@@ -943,6 +994,7 @@ See the [configuration page](configuration.html) for information on Spark config
    Configure <a href="https://kubernetes.io/docs/concepts/storage/volumes/">Kubernetes Volume</a> options passed to the Kubernetes with <code>OptionName</code> as key having specified value, must conform with Kubernetes option format. For example,
    <code>spark.kubernetes.driver.volumes.persistentVolumeClaim.checkpointpvc.options.claimName=spark-pvc-claim</code>.
   </td>
+  <td>2.4.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.executor.volumes.[VolumeType].[VolumeName].mount.path</code></td>
@@ -951,6 +1003,7 @@ See the [configuration page](configuration.html) for information on Spark config
    Add the <a href="https://kubernetes.io/docs/concepts/storage/volumes/">Kubernetes Volume</a> named <code>VolumeName</code> of the <code>VolumeType</code> type to the executor pod on the path specified in the value. For example,
    <code>spark.kubernetes.executor.volumes.persistentVolumeClaim.checkpointpvc.mount.path=/checkpoint</code>.
   </td>
+  <td>2.4.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.executor.volumes.[VolumeType].[VolumeName].mount.subPath</code></td>
@@ -959,6 +1012,7 @@ See the [configuration page](configuration.html) for information on Spark config
    Specifies a <a href="https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath">subpath</a> to be mounted from the volume into the executor pod.
    <code>spark.kubernetes.executor.volumes.persistentVolumeClaim.checkpointpvc.mount.subPath=checkpoint</code>.
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.executor.volumes.[VolumeType].[VolumeName].mount.readOnly</code></td>
@@ -967,6 +1021,7 @@ See the [configuration page](configuration.html) for information on Spark config
    Specify if the mounted volume is read only or not. For example,
    <code>spark.kubernetes.executor.volumes.persistentVolumeClaim.checkpointpvc.mount.readOnly=false</code>.
   </td>
+  <td>2.4.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.executor.volumes.[VolumeType].[VolumeName].options.[OptionName]</code></td>
@@ -975,6 +1030,7 @@ See the [configuration page](configuration.html) for information on Spark config
    Configure <a href="https://kubernetes.io/docs/concepts/storage/volumes/">Kubernetes Volume</a> options passed to the Kubernetes with <code>OptionName</code> as key having specified value. For example,
    <code>spark.kubernetes.executor.volumes.persistentVolumeClaim.checkpointpvc.options.claimName=spark-pvc-claim</code>.
   </td>
+  <td>2.4.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.local.dirs.tmpfs</code></td>
@@ -983,6 +1039,7 @@ See the [configuration page](configuration.html) for information on Spark config
    Configure the <code>emptyDir</code> volumes used to back <code>SPARK_LOCAL_DIRS</code> within the Spark driver and executor pods to use <code>tmpfs</code> backing i.e. RAM.  See <a href="#local-storage">Local Storage</a> earlier on this page
    for more discussion of this.
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.memoryOverheadFactor</code></td>
@@ -991,6 +1048,7 @@ See the [configuration page](configuration.html) for information on Spark config
     This sets the Memory Overhead Factor that will allocate memory to non-JVM memory, which includes off-heap memory allocations, non-JVM tasks, and various systems processes. For JVM-based jobs this value will default to 0.10 and 0.40 for non-JVM jobs.
     This is done as non-JVM tasks need more non-JVM heap space and such tasks commonly fail with "Memory Overhead Exceeded" errors. This prempts this error with a higher default.
   </td>
+  <td>2.4.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.pyspark.pythonVersion</code></td>
@@ -998,6 +1056,7 @@ See the [configuration page](configuration.html) for information on Spark config
   <td>
    This sets the major Python version of the docker image used to run the driver and executor containers. Can either be 2 or 3.
   </td>
+  <td>2.4.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.kerberos.krb5.path</code></td>
@@ -1006,6 +1065,7 @@ See the [configuration page](configuration.html) for information on Spark config
    Specify the local location of the krb5.conf file to be mounted on the driver and executors for Kerberos interaction.
    It is important to note that the KDC defined needs to be visible from inside the containers.
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.kerberos.krb5.configMapName</code></td>
@@ -1015,6 +1075,7 @@ See the [configuration page](configuration.html) for information on Spark config
    for Kerberos interaction. The KDC defined needs to be visible from inside the containers. The ConfigMap must also
    be in the same namespace of the driver and executor pods.
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.hadoop.configMapName</code></td>
@@ -1023,6 +1084,7 @@ See the [configuration page](configuration.html) for information on Spark config
     Specify the name of the ConfigMap, containing the HADOOP_CONF_DIR files, to be mounted on the driver
     and executors for custom Hadoop configuration.
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.kerberos.tokenSecret.name</code></td>
@@ -1031,6 +1093,7 @@ See the [configuration page](configuration.html) for information on Spark config
     Specify the name of the secret where your existing delegation tokens are stored. This removes the need for the job user
     to provide any kerberos credentials for launching a job.
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.kerberos.tokenSecret.itemKey</code></td>
@@ -1039,6 +1102,7 @@ See the [configuration page](configuration.html) for information on Spark config
     Specify the item key of the data where your existing delegation tokens are stored. This removes the need for the job user
     to provide any kerberos credentials for launching a job.
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.driver.podTemplateFile</code></td>
@@ -1047,6 +1111,7 @@ See the [configuration page](configuration.html) for information on Spark config
    Specify the local file that contains the driver <a href="#pod-template">pod template</a>. For example
    <code>spark.kubernetes.driver.podTemplateFile=/path/to/driver-pod-template.yaml</code>
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.driver.podTemplateContainerName</code></td>
@@ -1055,6 +1120,7 @@ See the [configuration page](configuration.html) for information on Spark config
    Specify the container name to be used as a basis for the driver in the given <a href="#pod-template">pod template</a>.
    For example <code>spark.kubernetes.driver.podTemplateContainerName=spark-driver</code>
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.executor.podTemplateFile</code></td>
@@ -1063,6 +1129,7 @@ See the [configuration page](configuration.html) for information on Spark config
    Specify the local file that contains the executor <a href="#pod-template">pod template</a>. For example
    <code>spark.kubernetes.executor.podTemplateFile=/path/to/executor-pod-template.yaml</code>
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.executor.podTemplateContainerName</code></td>
@@ -1071,6 +1138,7 @@ See the [configuration page](configuration.html) for information on Spark config
    Specify the container name to be used as a basis for the executor in the given <a href="#pod-template">pod template</a>.
    For example <code>spark.kubernetes.executor.podTemplateContainerName=spark-executor</code>
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.executor.deleteOnTermination</code></td>
@@ -1078,6 +1146,7 @@ See the [configuration page](configuration.html) for information on Spark config
   <td>
   Specify whether executor pods should be deleted in case of failure or normal termination.
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.submission.connectionTimeout</code></td>
@@ -1085,6 +1154,7 @@ See the [configuration page](configuration.html) for information on Spark config
   <td>
     Connection timeout in milliseconds for the kubernetes client to use for starting the driver.
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.submission.requestTimeout</code></td>
@@ -1092,6 +1162,7 @@ See the [configuration page](configuration.html) for information on Spark config
   <td>
     Request timeout in milliseconds for the kubernetes client to use for starting the driver.
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.driver.connectionTimeout</code></td>
@@ -1099,6 +1170,7 @@ See the [configuration page](configuration.html) for information on Spark config
   <td>
     Connection timeout in milliseconds for the kubernetes client in driver to use when requesting executors.
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.driver.requestTimeout</code></td>
@@ -1106,6 +1178,7 @@ See the [configuration page](configuration.html) for information on Spark config
   <td>
     Request timeout in milliseconds for the kubernetes client in driver to use when requesting executors.
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>  
   <td><code>spark.kubernetes.appKillPodDeletionGracePeriod</code></td>
@@ -1113,6 +1186,7 @@ See the [configuration page](configuration.html) for information on Spark config
   <td>
   Specify the grace period in seconds when deleting a Spark application using spark-submit.
   </td>
+  <td>3.0.0</td>
 </tr>
 <tr>
   <td><code>spark.kubernetes.file.upload.path</code></td>
@@ -1122,6 +1196,7 @@ See the [configuration page](configuration.html) for information on Spark config
     <code>spark.kubernetes.file.upload.path=s3a://&lt;s3-bucket&gt;/path</code>
     File should specified as <code>file://path/to/file </code> or absolute path.
   </td>
+  <td>3.0.0</td>
 </tr>
 </table>
 
diff --git a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/Config.scala b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/Config.scala
index b326650..8684a60 100644
--- a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/Config.scala
+++ b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/Config.scala
@@ -33,6 +33,7 @@ private[spark] object Config extends Logging {
         "to .kube/config under your home directory.  If not specified then your current " +
         "context is used.  You can always override specific aspects of the config file " +
         "provided configuration using other Spark on K8S configuration options.")
+      .version("3.0.0")
       .stringConf
       .createOptional
 
@@ -40,12 +41,14 @@ private[spark] object Config extends Logging {
     ConfigBuilder("spark.kubernetes.driver.master")
       .doc("The internal Kubernetes master (API server) address " +
         "to be used for driver to request executors.")
+      .version("3.0.0")
       .stringConf
       .createWithDefault(KUBERNETES_MASTER_INTERNAL_URL)
 
   val KUBERNETES_NAMESPACE =
     ConfigBuilder("spark.kubernetes.namespace")
       .doc("The namespace that will be used for running the driver and executor pods.")
+      .version("2.3.0")
       .stringConf
       .createWithDefault("default")
 
@@ -54,22 +57,26 @@ private[spark] object Config extends Logging {
       .doc("Container image to use for Spark containers. Individual container types " +
         "(e.g. driver or executor) can also be configured to use different images if desired, " +
         "by setting the container type-specific image name.")
+      .version("2.3.0")
       .stringConf
       .createOptional
 
   val DRIVER_CONTAINER_IMAGE =
     ConfigBuilder("spark.kubernetes.driver.container.image")
       .doc("Container image to use for the driver.")
+      .version("2.3.0")
       .fallbackConf(CONTAINER_IMAGE)
 
   val EXECUTOR_CONTAINER_IMAGE =
     ConfigBuilder("spark.kubernetes.executor.container.image")
       .doc("Container image to use for the executors.")
+      .version("2.3.0")
       .fallbackConf(CONTAINER_IMAGE)
 
   val CONTAINER_IMAGE_PULL_POLICY =
     ConfigBuilder("spark.kubernetes.container.image.pullPolicy")
       .doc("Kubernetes image pull policy. Valid values are Always, Never, and IfNotPresent.")
+      .version("2.3.0")
       .stringConf
       .checkValues(Set("Always", "Never", "IfNotPresent"))
       .createWithDefault("IfNotPresent")
@@ -78,6 +85,7 @@ private[spark] object Config extends Logging {
     ConfigBuilder("spark.kubernetes.container.image.pullSecrets")
       .doc("Comma separated list of the Kubernetes secrets used " +
         "to access private image registries.")
+      .version("2.4.0")
       .stringConf
       .toSequence
       .createWithDefault(Nil)
@@ -95,24 +103,28 @@ private[spark] object Config extends Logging {
   val SUBMISSION_CLIENT_REQUEST_TIMEOUT =
     ConfigBuilder("spark.kubernetes.submission.requestTimeout")
       .doc("request timeout to be used in milliseconds for starting the driver")
+      .version("3.0.0")
       .intConf
       .createWithDefault(10000)
 
   val SUBMISSION_CLIENT_CONNECTION_TIMEOUT =
     ConfigBuilder("spark.kubernetes.submission.connectionTimeout")
       .doc("connection timeout to be used in milliseconds for starting the driver")
+      .version("3.0.0")
       .intConf
       .createWithDefault(10000)
 
   val DRIVER_CLIENT_REQUEST_TIMEOUT =
     ConfigBuilder("spark.kubernetes.driver.requestTimeout")
       .doc("request timeout to be used in milliseconds for driver to request executors")
+      .version("3.0.0")
       .intConf
       .createWithDefault(10000)
 
   val DRIVER_CLIENT_CONNECTION_TIMEOUT =
     ConfigBuilder("spark.kubernetes.driver.connectionTimeout")
       .doc("connection timeout to be used in milliseconds for driver to request executors")
+      .version("3.0.0")
       .intConf
       .createWithDefault(10000)
 
@@ -122,48 +134,56 @@ private[spark] object Config extends Logging {
         "this service account when requesting executor pods from the API server. If specific " +
         "credentials are given for the driver pod to use, the driver will favor " +
         "using those credentials instead.")
+      .version("2.3.0")
       .stringConf
       .createOptional
 
   val KUBERNETES_DRIVER_LIMIT_CORES =
     ConfigBuilder("spark.kubernetes.driver.limit.cores")
       .doc("Specify the hard cpu limit for the driver pod")
+      .version("2.3.0")
       .stringConf
       .createOptional
 
   val KUBERNETES_DRIVER_REQUEST_CORES =
     ConfigBuilder("spark.kubernetes.driver.request.cores")
       .doc("Specify the cpu request for the driver pod")
+      .version("3.0.0")
       .stringConf
       .createOptional
 
   val KUBERNETES_DRIVER_SUBMIT_CHECK =
     ConfigBuilder("spark.kubernetes.submitInDriver")
     .internal()
+    .version("2.4.0")
     .booleanConf
     .createWithDefault(false)
 
   val KUBERNETES_EXECUTOR_LIMIT_CORES =
     ConfigBuilder("spark.kubernetes.executor.limit.cores")
       .doc("Specify the hard cpu limit for each executor pod")
+      .version("2.3.0")
       .stringConf
       .createOptional
 
   val KUBERNETES_EXECUTOR_SCHEDULER_NAME =
     ConfigBuilder("spark.kubernetes.executor.scheduler.name")
       .doc("Specify the scheduler name for each executor pod")
+      .version("3.0.0")
       .stringConf
       .createOptional
 
   val KUBERNETES_EXECUTOR_REQUEST_CORES =
     ConfigBuilder("spark.kubernetes.executor.request.cores")
       .doc("Specify the cpu request for each executor pod")
+      .version("2.4.0")
       .stringConf
       .createOptional
 
   val KUBERNETES_DRIVER_POD_NAME =
     ConfigBuilder("spark.kubernetes.driver.pod.name")
       .doc("Name of the driver pod.")
+      .version("2.3.0")
       .stringConf
       .createOptional
 
@@ -171,12 +191,14 @@ private[spark] object Config extends Logging {
   val KUBERNETES_DRIVER_POD_NAME_PREFIX =
     ConfigBuilder("spark.kubernetes.driver.resourceNamePrefix")
       .internal()
+      .version("3.0.0")
       .stringConf
       .createOptional
 
   val KUBERNETES_EXECUTOR_POD_NAME_PREFIX =
     ConfigBuilder("spark.kubernetes.executor.podNamePrefix")
       .doc("Prefix to use in front of the executor pod names.")
+      .version("2.3.0")
       .internal()
       .stringConf
       .createOptional
@@ -184,6 +206,7 @@ private[spark] object Config extends Logging {
   val KUBERNETES_ALLOCATION_BATCH_SIZE =
     ConfigBuilder("spark.kubernetes.allocation.batch.size")
       .doc("Number of pods to launch at once in each round of executor allocation.")
+      .version("2.3.0")
       .intConf
       .checkValue(value => value > 0, "Allocation batch size should be a positive integer")
       .createWithDefault(5)
@@ -191,6 +214,7 @@ private[spark] object Config extends Logging {
   val KUBERNETES_ALLOCATION_BATCH_DELAY =
     ConfigBuilder("spark.kubernetes.allocation.batch.delay")
       .doc("Time to wait between each round of executor allocation.")
+      .version("2.3.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .checkValue(value => value > 0, "Allocation batch delay must be a positive time value.")
       .createWithDefaultString("1s")
@@ -199,6 +223,7 @@ private[spark] object Config extends Logging {
     ConfigBuilder("spark.kubernetes.executor.lostCheck.maxAttempts")
       .doc("Maximum number of attempts allowed for checking the reason of an executor loss " +
         "before it is assumed that the executor failed.")
+      .version("2.3.0")
       .intConf
       .checkValue(value => value > 0, "Maximum attempts of checks of executor lost reason " +
         "must be a positive integer")
@@ -208,12 +233,14 @@ private[spark] object Config extends Logging {
     ConfigBuilder("spark.kubernetes.submission.waitAppCompletion")
       .doc("In cluster mode, whether to wait for the application to finish before exiting the " +
         "launcher process.")
+      .version("2.3.0")
       .booleanConf
       .createWithDefault(true)
 
   val REPORT_INTERVAL =
     ConfigBuilder("spark.kubernetes.report.interval")
       .doc("Interval between reports of the current app status in cluster mode.")
+      .version("2.3.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .checkValue(interval => interval > 0, s"Logging interval must be a positive time value.")
       .createWithDefaultString("1s")
@@ -222,6 +249,7 @@ private[spark] object Config extends Logging {
     ConfigBuilder("spark.kubernetes.executor.apiPollingInterval")
       .doc("Interval between polls against the Kubernetes API server to inspect the " +
         "state of executors.")
+      .version("2.4.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .checkValue(interval => interval > 0, s"API server polling interval must be a" +
         " positive time value.")
@@ -231,6 +259,7 @@ private[spark] object Config extends Logging {
     ConfigBuilder("spark.kubernetes.executor.eventProcessingInterval")
       .doc("Interval between successive inspection of executor events sent from the" +
         " Kubernetes API.")
+      .version("2.4.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .checkValue(interval => interval > 0, s"Event processing interval must be a positive" +
         " time value.")
@@ -240,6 +269,7 @@ private[spark] object Config extends Logging {
     ConfigBuilder("spark.kubernetes.memoryOverheadFactor")
       .doc("This sets the Memory Overhead Factor that will allocate memory to non-JVM jobs " +
         "which in the case of JVM tasks will default to 0.10 and 0.40 for non-JVM jobs")
+      .version("2.4.0")
       .doubleConf
       .checkValue(mem_overhead => mem_overhead >= 0 && mem_overhead < 1,
         "Ensure that memory overhead is a double between 0 --> 1.0")
@@ -248,6 +278,7 @@ private[spark] object Config extends Logging {
   val PYSPARK_MAJOR_PYTHON_VERSION =
     ConfigBuilder("spark.kubernetes.pyspark.pythonVersion")
       .doc("This sets the major Python version. Either 2 or 3. (Python2 or Python3)")
+      .version("2.4.0")
       .stringConf
       .checkValue(pv => List("2", "3").contains(pv),
         "Ensure that major Python version is either Python2 or Python3")
@@ -258,6 +289,7 @@ private[spark] object Config extends Logging {
       .doc("Specify the local location of the krb5.conf file to be mounted on the driver " +
         "and executors for Kerberos. Note: The KDC defined needs to be " +
         "visible from inside the containers ")
+      .version("3.0.0")
       .stringConf
       .createOptional
 
@@ -266,6 +298,7 @@ private[spark] object Config extends Logging {
       .doc("Specify the name of the ConfigMap, containing the krb5.conf file, to be mounted " +
         "on the driver and executors for Kerberos. Note: The KDC defined" +
         "needs to be visible from inside the containers ")
+      .version("3.0.0")
       .stringConf
       .createOptional
 
@@ -273,6 +306,7 @@ private[spark] object Config extends Logging {
     ConfigBuilder("spark.kubernetes.hadoop.configMapName")
       .doc("Specify the name of the ConfigMap, containing the HADOOP_CONF_DIR files, " +
         "to be mounted on the driver and executors for custom Hadoop configuration.")
+      .version("3.0.0")
       .stringConf
       .createOptional
 
@@ -280,6 +314,7 @@ private[spark] object Config extends Logging {
     ConfigBuilder("spark.kubernetes.kerberos.tokenSecret.name")
       .doc("Specify the name of the secret where your existing delegation tokens are stored. " +
         "This removes the need for the job user to provide any keytab for launching a job")
+      .version("3.0.0")
       .stringConf
       .createOptional
 
@@ -287,13 +322,15 @@ private[spark] object Config extends Logging {
     ConfigBuilder("spark.kubernetes.kerberos.tokenSecret.itemKey")
       .doc("Specify the item key of the data where your existing delegation tokens are stored. " +
         "This removes the need for the job user to provide any keytab for launching a job")
+      .version("3.0.0")
       .stringConf
       .createOptional
 
   val APP_RESOURCE_TYPE =
     ConfigBuilder("spark.kubernetes.resource.type")
-      .doc("This sets the resource type internally")
       .internal()
+      .doc("This sets the resource type internally")
+      .version("2.4.1")
       .stringConf
       .checkValues(Set(APP_RESOURCE_TYPE_JAVA, APP_RESOURCE_TYPE_PYTHON, APP_RESOURCE_TYPE_R))
       .createOptional
@@ -304,30 +341,35 @@ private[spark] object Config extends Logging {
         "their medium set to Memory so that they will be created as tmpfs (i.e. RAM) backed " +
         "volumes. This may improve performance but scratch space usage will count towards " +
         "your pods memory limit so you may wish to request more memory.")
+      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
   val KUBERNETES_DRIVER_PODTEMPLATE_FILE =
     ConfigBuilder("spark.kubernetes.driver.podTemplateFile")
       .doc("File containing a template pod spec for the driver")
+      .version("3.0.0")
       .stringConf
       .createOptional
 
   val KUBERNETES_EXECUTOR_PODTEMPLATE_FILE =
     ConfigBuilder("spark.kubernetes.executor.podTemplateFile")
       .doc("File containing a template pod spec for executors")
+      .version("3.0.0")
       .stringConf
       .createOptional
 
   val KUBERNETES_DRIVER_PODTEMPLATE_CONTAINER_NAME =
     ConfigBuilder("spark.kubernetes.driver.podTemplateContainerName")
       .doc("container name to be used as a basis for the driver in the given pod template")
+      .version("3.0.0")
       .stringConf
       .createOptional
 
   val KUBERNETES_EXECUTOR_PODTEMPLATE_CONTAINER_NAME =
     ConfigBuilder("spark.kubernetes.executor.podTemplateContainerName")
       .doc("container name to be used as a basis for executors in the given pod template")
+      .version("3.0.0")
       .stringConf
       .createOptional
 
@@ -340,12 +382,14 @@ private[spark] object Config extends Logging {
     ConfigBuilder("spark.kubernetes.executor.deleteOnTermination")
       .doc("If set to false then executor pods will not be deleted in case " +
         "of failure or normal termination.")
+      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
   val KUBERNETES_DYN_ALLOC_KILL_GRACE_PERIOD =
     ConfigBuilder("spark.kubernetes.dynamicAllocation.deleteGracePeriod")
       .doc("How long to wait for executors to shut down gracefully before a forceful kill.")
+      .version("3.0.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .createWithDefaultString("5s")
 
@@ -353,6 +397,7 @@ private[spark] object Config extends Logging {
     ConfigBuilder("spark.kubernetes.appKillPodDeletionGracePeriod")
       .doc("Time to wait for graceful deletion of Spark pods when spark-submit" +
         " is used for killing an application.")
+      .version("3.0.0")
       .timeConf(TimeUnit.SECONDS)
       .createOptional
 
@@ -360,6 +405,7 @@ private[spark] object Config extends Logging {
     ConfigBuilder("spark.kubernetes.file.upload.path")
       .doc("Hadoop compatible file system path where files from the local file system " +
         "will be uploded to in cluster mode.")
+      .version("3.0.0")
       .stringConf
       .createOptional
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org


[spark] 02/03: [SPARK-31109][MESOS][DOC] Add version information to the configuration of Mesos

Posted by gu...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/spark.git

commit d8b86fd5fb491c3e188fb9b30ca1f4b6a8384188
Author: beliefer <be...@163.com>
AuthorDate: Thu Mar 12 11:02:29 2020 +0900

    [SPARK-31109][MESOS][DOC] Add version information to the configuration of Mesos
    
    ### What changes were proposed in this pull request?
    Add version information to the configuration of `Mesos`.
    
    I sorted out some information show below.
    
    Item name | Since version | JIRA ID | Commit ID | Note
    -- | -- | -- | -- | --
    spark.mesos.$taskType.secret.names | 2.3.0 | SPARK-22131 | 5415963d2caaf95604211419ffc4e29fff38e1d7#diff-91e6e5f871160782dc50d4060d6faea3 |  
    spark.mesos.$taskType.secret.values | 2.3.0 | SPARK-22131 | 5415963d2caaf95604211419ffc4e29fff38e1d7#diff-91e6e5f871160782dc50d4060d6faea3 |  
    spark.mesos.$taskType.secret.envkeys | 2.3.0 | SPARK-22131 | 5415963d2caaf95604211419ffc4e29fff38e1d7#diff-91e6e5f871160782dc50d4060d6faea3 |  
    spark.mesos.$taskType.secret.filenames | 2.3.0 | SPARK-22131 | 5415963d2caaf95604211419ffc4e29fff38e1d7#diff-91e6e5f871160782dc50d4060d6faea3 |  
    spark.mesos.principal | 1.5.0 | SPARK-6284 | d86bbb4e286f16f77ba125452b07827684eafeed#diff-02a6d899f7a529eb7cfbb12182a110b0 |  
    spark.mesos.principal.file | 2.4.0 | SPARK-16501 | 7f10cf83f311526737fc96d5bb8281d12e41932f#diff-daf48dabbe58afaeed8787751750b01d |  
    spark.mesos.secret | 1.5.0 | SPARK-6284 | d86bbb4e286f16f77ba125452b07827684eafeed#diff-02a6d899f7a529eb7cfbb12182a110b0 |  
    spark.mesos.secret.file | 2.4.0 | SPARK-16501 | 7f10cf83f311526737fc96d5bb8281d12e41932f#diff-daf48dabbe58afaeed8787751750b01d |  
    spark.shuffle.cleaner.interval | 2.0.0 | SPARK-12583 | 310981d49a332bd329303f610b150bbe02cf5f87#diff-2fafefee94f2a2023ea9765536870258 |  
    spark.mesos.dispatcher.webui.url | 2.0.0 | SPARK-13492 | a4a0addccffb7cd0ece7947d55ce2538afa54c97#diff-f541460c7a74cee87cbb460b3b01665e |  
    spark.mesos.dispatcher.historyServer.url | 2.1.0 | SPARK-16809 | 62e62124419f3fa07b324f5e42feb2c5b4fde715#diff-3779e2035d9a09fa5f6af903925b9512 |  
    spark.mesos.driver.labels | 2.3.0 | SPARK-21000 | 8da3f7041aafa71d7596b531625edb899970fec2#diff-91e6e5f871160782dc50d4060d6faea3 |  
    spark.mesos.driver.webui.url | 2.0.0 | SPARK-13492 | a4a0addccffb7cd0ece7947d55ce2538afa54c97#diff-e3a5e67b8de2069ce99801372e214b8e |  
    spark.mesos.driver.failoverTimeout | 2.3.0 | SPARK-21456 | c42ef953343073a50ef04c5ce848b574ff7f2238#diff-91e6e5f871160782dc50d4060d6faea3 |  
    spark.mesos.network.name | 2.1.0 | SPARK-18232 | d89bfc92302424406847ac7a9cfca714e6b742fc#diff-ab5bf34f1951a8f7ea83c9456a6c3ab7 |  
    spark.mesos.network.labels | 2.3.0 | SPARK-21694 | ce0d3bb377766bdf4df7852272557ae846408877#diff-91e6e5f871160782dc50d4060d6faea3 |  
    spark.mesos.driver.constraints | 2.2.1 | SPARK-19606 | f6ee3d90d5c299e67ae6e2d553c16c0d9759d4b5#diff-91e6e5f871160782dc50d4060d6faea3 |  
    spark.mesos.driver.frameworkId | 2.1.0 | SPARK-16809 | 62e62124419f3fa07b324f5e42feb2c5b4fde715#diff-02a6d899f7a529eb7cfbb12182a110b0 |  
    spark.executor.uri | 0.8.0 | None | 46eecd110a4017ea0c86cbb1010d0ccd6a5eb2ef#diff-a885e7df97790e9b59c21c63353e7476 |  
    spark.mesos.proxy.baseURL | 2.3.0 | SPARK-13041 | 663f30d14a0c9219e07697af1ab56e11a714d9a6#diff-0b9b4e122eb666155aa189a4321a6ca8 |  
    spark.mesos.coarse | 0.6.0 | None | 63051dd2bcc4bf09d413ff7cf89a37967edc33ba#diff-eaf125f56ce786d64dcef99cf446a751 |  
    spark.mesos.coarse.shutdownTimeout | 2.0.0 | SPARK-12330 | c756bda477f458ba4aad7fdb2026263507e0ad9b#diff-d425d35aa23c47a62fbb538554f2f2cf |  
    spark.mesos.maxDrivers | 1.4.0 | SPARK-5338 | 53befacced828bbac53c6e3a4976ec3f036bae9e#diff-b964c449b99c51f0a5fd77270b2951a4 |  
    spark.mesos.retainedDrivers | 1.4.0 | SPARK-5338 | 53befacced828bbac53c6e3a4976ec3f036bae9e#diff-b964c449b99c51f0a5fd77270b2951a4 |  
    spark.mesos.cluster.retry.wait.max | 1.4.0 | SPARK-5338 | 53befacced828bbac53c6e3a4976ec3f036bae9e#diff-b964c449b99c51f0a5fd77270b2951a4 |  
    spark.mesos.fetcherCache.enable | 2.1.0 | SPARK-15994 | e34b4e12673fb76c92f661d7c03527410857a0f8#diff-772ea7311566edb25f11a4c4f882179a |  
    spark.mesos.appJar.local.resolution.mode | 2.4.0 | SPARK-24326 | 22df953f6bb191858053eafbabaa5b3ebca29f56#diff-6e4d0a0445975f03f975fdc1e3d80e49 |  
    spark.mesos.rejectOfferDuration | 2.2.0 | SPARK-19702 | 2e30c0b9bcaa6f7757bd85d1f1ec392d5f916f83#diff-daf48dabbe58afaeed8787751750b01d |  
    spark.mesos.rejectOfferDurationForUnmetConstraints | 1.6.0 | SPARK-10471 | 74f50275e429e649212928a9f36552941b862edc#diff-02a6d899f7a529eb7cfbb12182a110b0 |  
    spark.mesos.rejectOfferDurationForReachedMaxCores | 2.0.0 | SPARK-13001 | 1e7d9bfb5a41f5c2479ab3b4d4081f00bf00bd31#diff-02a6d899f7a529eb7cfbb12182a110b0 |  
    spark.mesos.uris | 1.5.0 | SPARK-8798 | a2f805729b401c68b60bd690ad02533b8db57b58#diff-e3a5e67b8de2069ce99801372e214b8e |  
    spark.mesos.executor.home | 1.1.1 | SPARK-3264 | 069ecfef02c4af69fc0d3755bd78be321b68b01d#diff-e3a5e67b8de2069ce99801372e214b8e |  
    spark.mesos.mesosExecutor.cores | 1.4.0 | SPARK-6350 | 6fbeb82e13db7117d8f216e6148632490a4bc5be#diff-e3a5e67b8de2069ce99801372e214b8e |  
    spark.mesos.extra.cores | 0.6.0 | None | 2d761e3353651049f6707c74bb5ffdd6e86f6f35#diff-37af8c6e3634f97410ade813a5172621 |  
    spark.mesos.executor.memoryOverhead | 1.1.1 | SPARK-3535 | 6f150978477830bbc14ba983786dd2bce12d1fe2#diff-6b498f5407d10e848acac4a1b182457c |  
    spark.mesos.executor.docker.image | 1.4.0 | SPARK-2691 | 8f50a07d2188ccc5315d979755188b1e5d5b5471#diff-e3a5e67b8de2069ce99801372e214b8e |  
    spark.mesos.executor.docker.forcePullImage | 2.1.0 | SPARK-15271 | 978cd5f125eb5a410bad2e60bf8385b11cf1b978#diff-0dd025320c7ecda2ea310ed7172d7f5a |  
    spark.mesos.executor.docker.portmaps | 1.4.0 | SPARK-7373 | 226033cfffa2f37ebaf8bc2c653f094e91ef0c9b#diff-b964c449b99c51f0a5fd77270b2951a4 |  
    spark.mesos.executor.docker.parameters | 2.2.0 | SPARK-19740 | a888fed3099e84c2cf45e9419f684a3658ada19d#diff-4139e6605a8c7f242f65cde538770c99 |  
    spark.mesos.executor.docker.volumes | 1.4.0 | SPARK-7373 | 226033cfffa2f37ebaf8bc2c653f094e91ef0c9b#diff-b964c449b99c51f0a5fd77270b2951a4 |  
    spark.mesos.gpus.max | 2.1.0 | SPARK-14082 | 29f186bfdf929b1e8ffd8e33ee37b76d5dc5af53#diff-d427ee890b913c5a7056be21eb4f39d7 |  
    spark.mesos.task.labels | 2.2.0 | SPARK-20085 | c8fc1f3badf61bcfc4bd8eeeb61f73078ca068d1#diff-387c5d0c916278495fc28420571adf9e |  
    spark.mesos.constraints | 1.5.0 | SPARK-6707 | 1165b17d24cdf1dbebb2faca14308dfe5c2a652c#diff-e3a5e67b8de2069ce99801372e214b8e |  
    spark.mesos.containerizer | 2.1.0 | SPARK-16637 | 266b92faffb66af24d8ed2725beb80770a2d91f8#diff-0dd025320c7ecda2ea310ed7172d7f5a |  
    spark.mesos.role | 1.5.0 | SPARK-6284 | d86bbb4e286f16f77ba125452b07827684eafeed#diff-02a6d899f7a529eb7cfbb12182a110b0 |  
    The following appears in the document |   |   |   |  
    spark.mesos.driverEnv.[EnvironmentVariableName] | 2.1.0 | SPARK-16194 | 235cb256d06653bcde4c3ed6b081503a94996321#diff-b964c449b99c51f0a5fd77270b2951a4 |  
    spark.mesos.dispatcher.driverDefault.[PropertyName] | 2.1.0 | SPARK-16927 and SPARK-16923 | eca58755fbbc11937b335ad953a3caff89b818e6#diff-b964c449b99c51f0a5fd77270b2951a4 |  
    
    ### Why are the changes needed?
    Supplemental configuration version information.
    
    ### Does this PR introduce any user-facing change?
    'No'.
    
    ### How was this patch tested?
    Exists UT
    
    Closes #27863 from beliefer/add-version-to-mesos-config.
    
    Authored-by: beliefer <be...@163.com>
    Signed-off-by: HyukjinKwon <gu...@apache.org>
---
 docs/running-on-mesos.md                           | 47 ++++++++++++++++---
 .../org/apache/spark/deploy/mesos/config.scala     | 52 +++++++++++++++++++---
 2 files changed, 87 insertions(+), 12 deletions(-)

diff --git a/docs/running-on-mesos.md b/docs/running-on-mesos.md
index cf51620a..6f6ae1c 100644
--- a/docs/running-on-mesos.md
+++ b/docs/running-on-mesos.md
@@ -371,7 +371,7 @@ See the [configuration page](configuration.html) for information on Spark config
 #### Spark Properties
 
 <table class="table">
-<tr><th>Property Name</th><th>Default</th><th>Meaning</th></tr>
+<tr><th>Property Name</th><th>Default</th><th>Meaning</th><th>Since Version</th></tr>
 <tr>
   <td><code>spark.mesos.coarse</code></td>
   <td>true</td>
@@ -380,6 +380,7 @@ See the [configuration page](configuration.html) for information on Spark config
     If set to <code>false</code>, runs over Mesos cluster in "fine-grained" sharing mode, where one Mesos task is created per Spark task.
     Detailed information in <a href="running-on-mesos.html#mesos-run-modes">'Mesos Run Modes'</a>.
   </td>
+  <td>0.6.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.extra.cores</code></td>
@@ -391,6 +392,7 @@ See the [configuration page](configuration.html) for information on Spark config
     send it more tasks.  Use this to increase parallelism.  This
     setting is only used for Mesos coarse-grained mode.
   </td>
+  <td>0.6.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.mesosExecutor.cores</code></td>
@@ -401,6 +403,7 @@ See the [configuration page](configuration.html) for information on Spark config
     is being run, each Mesos executor will occupy the number of cores configured here.
     The value can be a floating point number.
   </td>
+  <td>1.4.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.executor.docker.image</code></td>
@@ -411,6 +414,7 @@ See the [configuration page](configuration.html) for information on Spark config
     The installed path of Spark in the image can be specified with <code>spark.mesos.executor.home</code>;
     the installed path of the Mesos library can be specified with <code>spark.executorEnv.MESOS_NATIVE_JAVA_LIBRARY</code>.
   </td>
+  <td>1.4.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.executor.docker.forcePullImage</code></td>
@@ -419,6 +423,7 @@ See the [configuration page](configuration.html) for information on Spark config
     Force Mesos agents to pull the image specified in <code>spark.mesos.executor.docker.image</code>.
     By default Mesos agents will not pull images they already have cached.
   </td>
+  <td>2.1.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.executor.docker.parameters</code></td>
@@ -429,6 +434,7 @@ See the [configuration page](configuration.html) for information on Spark config
 
     <pre>key1=val1,key2=val2,key3=val3</pre>
   </td>
+  <td>2.2.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.executor.docker.volumes</code></td>
@@ -440,6 +446,7 @@ See the [configuration page](configuration.html) for information on Spark config
 
     <pre>[host_path:]container_path[:ro|:rw]</pre>
   </td>
+  <td>1.4.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.task.labels</code></td>
@@ -450,6 +457,7 @@ See the [configuration page](configuration.html) for information on Spark config
     list more than one.  If your label includes a colon or comma, you
     can escape it with a backslash.  Ex. key:value,key2:a\:b.
   </td>
+  <td>2.2.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.executor.home</code></td>
@@ -460,6 +468,7 @@ See the [configuration page](configuration.html) for information on Spark config
     them. Note that this is only relevant if a Spark binary package is not specified through
     <code>spark.executor.uri</code>.
   </td>
+  <td>1.1.1</td>
 </tr>
 <tr>
   <td><code>spark.mesos.executor.memoryOverhead</code></td>
@@ -469,6 +478,7 @@ See the [configuration page](configuration.html) for information on Spark config
     the overhead will be larger of either 384 or 10% of <code>spark.executor.memory</code>. If set,
     the final overhead will be this value.
   </td>
+  <td>1.1.1</td>
 </tr>
 <tr>
   <td><code>spark.mesos.uris</code></td>
@@ -478,6 +488,7 @@ See the [configuration page](configuration.html) for information on Spark config
     when driver or executor is launched by Mesos.  This applies to
     both coarse-grained and fine-grained mode.
   </td>
+  <td>1.5.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.principal</code></td>
@@ -485,6 +496,7 @@ See the [configuration page](configuration.html) for information on Spark config
   <td>
     Set the principal with which Spark framework will use to authenticate with Mesos.  You can also specify this via the environment variable `SPARK_MESOS_PRINCIPAL`.
   </td>
+  <td>1.5.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.principal.file</code></td>
@@ -492,6 +504,7 @@ See the [configuration page](configuration.html) for information on Spark config
   <td>
     Set the file containing the principal with which Spark framework will use to authenticate with Mesos.  Allows specifying the principal indirectly in more security conscious deployments.  The file must be readable by the user launching the job and be UTF-8 encoded plaintext.  You can also specify this via the environment variable `SPARK_MESOS_PRINCIPAL_FILE`.
   </td>
+  <td>2.4.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.secret</code></td>
@@ -500,6 +513,7 @@ See the [configuration page](configuration.html) for information on Spark config
     Set the secret with which Spark framework will use to authenticate with Mesos. Used, for example, when
     authenticating with the registry.  You can also specify this via the environment variable `SPARK_MESOS_SECRET`.
   </td>
+  <td>1.5.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.secret.file</code></td>
@@ -508,6 +522,7 @@ See the [configuration page](configuration.html) for information on Spark config
     Set the file containing the secret with which Spark framework will use to authenticate with Mesos. Used, for example, when
     authenticating with the registry.  Allows for specifying the secret indirectly in more security conscious deployments.  The file must be readable by the user launching the job and be UTF-8 encoded plaintext.  You can also specify this via the environment variable `SPARK_MESOS_SECRET_FILE`.
   </td>
+  <td>2.4.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.role</code></td>
@@ -516,6 +531,7 @@ See the [configuration page](configuration.html) for information on Spark config
     Set the role of this Spark framework for Mesos. Roles are used in Mesos for reservations
     and resource weight sharing.
   </td>
+  <td>1.5.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.constraints</code></td>
@@ -532,6 +548,7 @@ See the [configuration page](configuration.html) for information on Spark config
       <li>In case there is no value present as a part of the constraint any offer with the corresponding attribute will be accepted (without value check).</li>
     </ul>
   </td>
+  <td>1.5.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.driver.constraints</code></td>
@@ -540,6 +557,7 @@ See the [configuration page](configuration.html) for information on Spark config
     Same as <code>spark.mesos.constraints</code> except applied to drivers when launched through the dispatcher. By default,
     all offers with sufficient resources will be accepted.
   </td>
+  <td>2.2.1</td>
 </tr>
 <tr>
   <td><code>spark.mesos.containerizer</code></td>
@@ -550,6 +568,7 @@ See the [configuration page](configuration.html) for information on Spark config
     containerizers for docker: the "docker" containerizer, and the preferred
     "mesos" containerizer.  Read more here: http://mesos.apache.org/documentation/latest/container-image/
   </td>
+  <td>2.1.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.driver.webui.url</code></td>
@@ -558,6 +577,7 @@ See the [configuration page](configuration.html) for information on Spark config
     Set the Spark Mesos driver webui_url for interacting with the framework.
     If unset it will point to Spark's internal web UI.
   </td>
+  <td>2.0.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.driver.labels</code></td>
@@ -566,8 +586,8 @@ See the [configuration page](configuration.html) for information on Spark config
     Mesos labels to add to the driver.  See <code>spark.mesos.task.labels</code>
     for formatting information.
   </td>
+  <td>2.3.0</td>
 </tr>
-
 <tr>
   <td>
     <code>spark.mesos.driver.secret.values</code>,
@@ -616,8 +636,8 @@ See the [configuration page](configuration.html) for information on Spark config
       <pre>spark.mesos.driver.secret.names=password1,password2</pre>
     </p>
   </td>
+  <td>2.3.0</td>
 </tr>
-
 <tr>
   <td>
     <code>spark.mesos.driver.secret.envkeys</code>,
@@ -670,8 +690,8 @@ See the [configuration page](configuration.html) for information on Spark config
       <pre>spark.mesos.driver.secret.filenames=pwdfile1,pwdfile2</pre>
     </p>
   </td>
+  <td>2.3.0</td>
 </tr>
-
 <tr>
   <td><code>spark.mesos.driverEnv.[EnvironmentVariableName]</code></td>
   <td><code>(none)</code></td>
@@ -681,6 +701,7 @@ See the [configuration page](configuration.html) for information on Spark config
     driver process. The user can specify multiple of these to set
     multiple environment variables.
   </td>
+  <td>2.1.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.dispatcher.webui.url</code></td>
@@ -689,7 +710,8 @@ See the [configuration page](configuration.html) for information on Spark config
     Set the Spark Mesos dispatcher webui_url for interacting with the framework.
     If unset it will point to Spark's internal web UI.
   </td>
-  </tr>
+  <td>2.0.0</td>
+</tr>
 <tr>
   <td><code>spark.mesos.dispatcher.driverDefault.[PropertyName]</code></td>
   <td><code>(none)</code></td>
@@ -699,7 +721,8 @@ See the [configuration page](configuration.html) for information on Spark config
     spark.mesos.dispatcher.driverProperty.spark.executor.memory=32g
     results in the executors for all drivers submitted in cluster mode
     to run in 32g containers.
-</td>
+  </td>
+  <td>2.1.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.dispatcher.historyServer.url</code></td>
@@ -709,6 +732,7 @@ See the [configuration page](configuration.html) for information on Spark config
     server</a>.  The dispatcher will then link each driver to its entry
     in the history server.
   </td>
+  <td>2.1.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.gpus.max</code></td>
@@ -717,7 +741,8 @@ See the [configuration page](configuration.html) for information on Spark config
     Set the maximum number GPU resources to acquire for this job. Note that executors will still launch when no GPU resources are found
     since this configuration is just an upper limit and not a guaranteed amount.
   </td>
-  </tr>
+  <td>2.1.0</td>
+</tr>
 <tr>
   <td><code>spark.mesos.network.name</code></td>
   <td><code>(none)</code></td>
@@ -728,6 +753,7 @@ See the [configuration page](configuration.html) for information on Spark config
     <a href="http://mesos.apache.org/documentation/latest/cni/">the Mesos CNI docs</a>
     for more details.
   </td>
+  <td>2.1.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.network.labels</code></td>
@@ -742,6 +768,7 @@ See the [configuration page](configuration.html) for information on Spark config
     <a href="http://mesos.apache.org/documentation/latest/cni/#mesos-meta-data-to-cni-plugins">the Mesos CNI docs</a>
     for more details.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.fetcherCache.enable</code></td>
@@ -752,6 +779,7 @@ See the [configuration page](configuration.html) for information on Spark config
     href="http://mesos.apache.org/documentation/latest/fetcher/">Mesos
     Fetcher Cache</a>
   </td>
+  <td>2.1.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.driver.failoverTimeout</code></td>
@@ -763,6 +791,7 @@ See the [configuration page](configuration.html) for information on Spark config
     executors. The default value is zero, meaning no timeout: if the 
     driver disconnects, the master immediately tears down the framework.
   </td>
+  <td>2.3.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.rejectOfferDuration</code></td>
@@ -772,6 +801,7 @@ See the [configuration page](configuration.html) for information on Spark config
     `spark.mesos.rejectOfferDurationForUnmetConstraints`,
     `spark.mesos.rejectOfferDurationForReachedMaxCores`
   </td>
+  <td>2.2.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.rejectOfferDurationForUnmetConstraints</code></td>
@@ -779,6 +809,7 @@ See the [configuration page](configuration.html) for information on Spark config
   <td>
     Time to consider unused resources refused with unmet constraints
   </td>
+  <td>1.6.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.rejectOfferDurationForReachedMaxCores</code></td>
@@ -787,6 +818,7 @@ See the [configuration page](configuration.html) for information on Spark config
     Time to consider unused resources refused when maximum number of cores
     <code>spark.cores.max</code> is reached
   </td>
+  <td>2.0.0</td>
 </tr>
 <tr>
   <td><code>spark.mesos.appJar.local.resolution.mode</code></td>
@@ -799,6 +831,7 @@ See the [configuration page](configuration.html) for information on Spark config
     If the value is `container` then spark submit in the container will use the jar in the container's path:
     `/path/to/jar`.
   </td>
+  <td>2.4.0</td>
 </tr>
 </table>
 
diff --git a/resource-managers/mesos/src/main/scala/org/apache/spark/deploy/mesos/config.scala b/resource-managers/mesos/src/main/scala/org/apache/spark/deploy/mesos/config.scala
index 79a1137..e1c0d18 100644
--- a/resource-managers/mesos/src/main/scala/org/apache/spark/deploy/mesos/config.scala
+++ b/resource-managers/mesos/src/main/scala/org/apache/spark/deploy/mesos/config.scala
@@ -28,6 +28,7 @@ package object config {
       ConfigBuilder(s"spark.mesos.$taskType.secret.names")
         .doc("A comma-separated list of secret reference names. Consult the Mesos Secret " +
           "protobuf for more information.")
+        .version("2.3.0")
         .stringConf
         .toSequence
         .createOptional
@@ -35,6 +36,7 @@ package object config {
     private[spark] val SECRET_VALUES =
       ConfigBuilder(s"spark.mesos.$taskType.secret.values")
         .doc("A comma-separated list of secret values.")
+        .version("2.3.0")
         .stringConf
         .toSequence
         .createOptional
@@ -43,6 +45,7 @@ package object config {
       ConfigBuilder(s"spark.mesos.$taskType.secret.envkeys")
         .doc("A comma-separated list of the environment variables to contain the secrets." +
           "The environment variable will be set on the driver.")
+        .version("2.3.0")
         .stringConf
         .toSequence
         .createOptional
@@ -51,6 +54,7 @@ package object config {
       ConfigBuilder(s"spark.mesos.$taskType.secret.filenames")
         .doc("A comma-separated list of file paths secret will be written to.  Consult the Mesos " +
           "Secret protobuf for more information.")
+        .version("2.3.0")
         .stringConf
         .toSequence
         .createOptional
@@ -59,6 +63,7 @@ package object config {
   private[spark] val CREDENTIAL_PRINCIPAL =
     ConfigBuilder("spark.mesos.principal")
       .doc("Name of the Kerberos principal to authenticate Spark to Mesos.")
+      .version("1.5.0")
       .stringConf
       .createOptional
 
@@ -66,18 +71,21 @@ package object config {
     ConfigBuilder("spark.mesos.principal.file")
       .doc("The path of file which contains the name of the Kerberos principal " +
         "to authenticate Spark to Mesos.")
+      .version("2.4.0")
       .stringConf
       .createOptional
 
   private[spark] val CREDENTIAL_SECRET =
     ConfigBuilder("spark.mesos.secret")
       .doc("The secret value to authenticate Spark to Mesos.")
+      .version("1.5.0")
       .stringConf
       .createOptional
 
   private[spark] val CREDENTIAL_SECRET_FILE =
     ConfigBuilder("spark.mesos.secret.file")
       .doc("The path of file which contains the secret value to authenticate Spark to Mesos.")
+      .version("2.4.0")
       .stringConf
       .createOptional
 
@@ -85,6 +93,7 @@ package object config {
 
   private[spark] val SHUFFLE_CLEANER_INTERVAL_S =
     ConfigBuilder("spark.shuffle.cleaner.interval")
+      .version("2.0.0")
       .timeConf(TimeUnit.SECONDS)
       .createWithDefaultString("30s")
 
@@ -92,6 +101,7 @@ package object config {
     ConfigBuilder("spark.mesos.dispatcher.webui.url")
       .doc("Set the Spark Mesos dispatcher webui_url for interacting with the " +
         "framework. If unset it will point to Spark's internal web UI.")
+      .version("2.0.0")
       .stringConf
       .createOptional
 
@@ -99,6 +109,7 @@ package object config {
     ConfigBuilder("spark.mesos.dispatcher.historyServer.url")
       .doc("Set the URL of the history server. The dispatcher will then " +
         "link each driver to its entry in the history server.")
+      .version("2.1.0")
       .stringConf
       .createOptional
 
@@ -107,6 +118,7 @@ package object config {
       .doc("Mesos labels to add to the driver.  Labels are free-form key-value pairs. Key-value " +
         "pairs should be separated by a colon, and commas used to list more than one." +
         "Ex. key:value,key2:value2")
+      .version("2.3.0")
       .stringConf
       .createOptional
 
@@ -114,6 +126,7 @@ package object config {
     ConfigBuilder("spark.mesos.driver.webui.url")
       .doc("Set the Spark Mesos driver webui_url for interacting with the framework. " +
         "If unset it will point to Spark's internal web UI.")
+      .version("2.0.0")
       .stringConf
       .createOptional
 
@@ -125,6 +138,7 @@ package object config {
     ConfigBuilder("spark.mesos.driver.failoverTimeout")
       .doc("Amount of time in seconds that the master will wait to hear from the driver, " +
           "during a temporary disconnection, before tearing down all the executors.")
+      .version("2.3.0")
       .doubleConf
       .createWithDefault(0.0)
 
@@ -132,6 +146,7 @@ package object config {
     ConfigBuilder("spark.mesos.network.name")
       .doc("Attach containers to the given named network. If this job is launched " +
         "in cluster mode, also launch the driver in the given named network.")
+      .version("2.1.0")
       .stringConf
       .createOptional
 
@@ -140,6 +155,7 @@ package object config {
       .doc("Network labels to pass to CNI plugins.  This is a comma-separated list " +
         "of key-value pairs, where each key-value pair has the format key:value. " +
         "Example: key1:val1,key2:val2")
+      .version("2.3.0")
       .stringConf
       .createOptional
 
@@ -147,19 +163,21 @@ package object config {
     ConfigBuilder("spark.mesos.driver.constraints")
       .doc("Attribute based constraints on mesos resource offers. Applied by the dispatcher " +
         "when launching drivers. Default is to accept all offers with sufficient resources.")
+      .version("2.2.1")
       .stringConf
       .createWithDefault("")
 
   private[spark] val DRIVER_FRAMEWORK_ID =
     ConfigBuilder("spark.mesos.driver.frameworkId")
+      .version("2.1.0")
       .stringConf
       .createOptional
 
   private[spark] val EXECUTOR_URI =
-    ConfigBuilder("spark.executor.uri").stringConf.createOptional
+    ConfigBuilder("spark.executor.uri").version("0.8.0").stringConf.createOptional
 
   private[spark] val PROXY_BASE_URL =
-    ConfigBuilder("spark.mesos.proxy.baseURL").stringConf.createOptional
+    ConfigBuilder("spark.mesos.proxy.baseURL").version("2.3.0").stringConf.createOptional
 
   private[spark] val COARSE_MODE =
     ConfigBuilder("spark.mesos.coarse")
@@ -167,22 +185,26 @@ package object config {
         "Spark acquires one long-lived Mesos task on each machine. If set to false, runs over " +
         "Mesos cluster in \"fine-grained\" sharing mode, where one Mesos task is created per " +
         "Spark task.")
-      .booleanConf.createWithDefault(true)
+      .version("0.6.0")
+      .booleanConf
+      .createWithDefault(true)
 
   private[spark] val COARSE_SHUTDOWN_TIMEOUT =
     ConfigBuilder("spark.mesos.coarse.shutdownTimeout")
+      .version("2.0.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .checkValue(_ >= 0, s"spark.mesos.coarse.shutdownTimeout must be >= 0")
       .createWithDefaultString("10s")
 
   private[spark] val MAX_DRIVERS =
-    ConfigBuilder("spark.mesos.maxDrivers").intConf.createWithDefault(200)
+    ConfigBuilder("spark.mesos.maxDrivers").version("1.4.0").intConf.createWithDefault(200)
 
   private[spark] val RETAINED_DRIVERS =
-    ConfigBuilder("spark.mesos.retainedDrivers").intConf.createWithDefault(200)
+    ConfigBuilder("spark.mesos.retainedDrivers").version("1.4.0").intConf.createWithDefault(200)
 
   private[spark] val CLUSTER_RETRY_WAIT_MAX_SECONDS =
     ConfigBuilder("spark.mesos.cluster.retry.wait.max")
+      .version("1.4.0")
       .intConf
       .createWithDefault(60) // 1 minute
 
@@ -190,6 +212,7 @@ package object config {
     ConfigBuilder("spark.mesos.fetcherCache.enable")
       .doc("If set to true, all URIs (example: `spark.executor.uri`, `spark.mesos.uris`) will be " +
         "cached by the Mesos Fetcher Cache.")
+      .version("2.1.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -201,6 +224,7 @@ package object config {
         "resource from the host's file system. If the value is unknown it prints a warning msg " +
         "in the dispatcher logs and defaults to `host`. If the value is `container` then spark " +
         "submit in the container will use the jar in the container's path: `/path/to/jar`.")
+      .version("2.4.0")
       .stringConf
       .checkValues(Set("host", "container"))
       .createWithDefault("host")
@@ -210,12 +234,14 @@ package object config {
       .doc("Time to consider unused resources refused, serves as a fallback of " +
         "`spark.mesos.rejectOfferDurationForUnmetConstraints`, " +
         "`spark.mesos.rejectOfferDurationForReachedMaxCores`.")
+      .version("2.2.0")
       .timeConf(TimeUnit.SECONDS)
       .createWithDefaultString("120s")
 
   private[spark] val REJECT_OFFER_DURATION_FOR_UNMET_CONSTRAINTS =
     ConfigBuilder("spark.mesos.rejectOfferDurationForUnmetConstraints")
       .doc("Time to consider unused resources refused with unmet constraints.")
+      .version("1.6.0")
       .timeConf(TimeUnit.SECONDS)
       .createOptional
 
@@ -223,6 +249,7 @@ package object config {
     ConfigBuilder("spark.mesos.rejectOfferDurationForReachedMaxCores")
       .doc("Time to consider unused resources refused when maximum number of cores " +
         "`spark.cores.max` is reached.")
+      .version("2.0.0")
       .timeConf(TimeUnit.SECONDS)
       .createOptional
 
@@ -231,6 +258,7 @@ package object config {
       .doc("A comma-separated list of URIs to be downloaded to the sandbox when driver or " +
         "executor is launched by Mesos. This applies to both coarse-grained and fine-grained " +
         "mode.")
+      .version("1.5.0")
       .stringConf
       .toSequence
       .createWithDefault(Nil)
@@ -241,6 +269,7 @@ package object config {
         "By default, the executors will simply use the driver's Spark home directory, which may " +
         "not be visible to them. Note that this is only relevant if a Spark binary package is " +
         "not specified through `spark.executor.uri`.")
+      .version("1.1.1")
       .stringConf
       .createOptional
 
@@ -250,6 +279,7 @@ package object config {
         "include the cores used to run the Spark tasks. In other words, even if no Spark task " +
         "is being run, each Mesos executor will occupy the number of cores configured here. " +
         "The value can be a floating point number.")
+      .version("1.4.0")
       .doubleConf
       .createWithDefault(1.0)
 
@@ -259,6 +289,7 @@ package object config {
         "more cores allocated. It instead means that an executor will \"pretend\" it has more " +
         "cores, so that the driver will send it more tasks. Use this to increase parallelism. " +
         "This setting is only used for Mesos coarse-grained mode.")
+      .version("0.6.0")
       .intConf
       .createWithDefault(0)
 
@@ -267,6 +298,7 @@ package object config {
       .doc("The amount of additional memory, specified in MiB, to be allocated per executor. " +
         "By default, the overhead will be larger of either 384 or 10% of " +
         "`spark.executor.memory`. If set, the final overhead will be this value.")
+      .version("1.1.1")
       .intConf
       .createOptional
 
@@ -277,6 +309,7 @@ package object config {
         "The installed path of Spark in the image can be specified with " +
         "`spark.mesos.executor.home`; the installed path of the Mesos library can be specified " +
         "with `spark.executorEnv.MESOS_NATIVE_JAVA_LIBRARY`.")
+      .version("1.4.0")
       .stringConf
       .createOptional
 
@@ -285,11 +318,13 @@ package object config {
       .doc("Force Mesos agents to pull the image specified in " +
         "`spark.mesos.executor.docker.image`. By default Mesos agents will not pull images they " +
         "already have cached.")
+      .version("2.1.0")
       .booleanConf
       .createOptional
 
   private[spark] val EXECUTOR_DOCKER_PORT_MAPS =
     ConfigBuilder("spark.mesos.executor.docker.portmaps")
+      .version("1.4.0")
       .stringConf
       .toSequence
       .createOptional
@@ -299,6 +334,7 @@ package object config {
       .doc("Set the list of custom parameters which will be passed into the `docker run` " +
         "command when launching the Spark executor on Mesos using the docker containerizer. " +
         "The format of this property is a list of key/value pairs which pair looks key1=value1.")
+      .version("2.2.0")
       .stringConf
       .toSequence
       .createOptional
@@ -309,6 +345,7 @@ package object config {
         "using `spark.mesos.executor.docker.image`. The format of this property is a list of " +
         "mappings following the form passed to `docker run -v`. That is they take the form:  " +
         "`[host_path:]container_path[:ro|:rw]`")
+      .version("1.4.0")
       .stringConf
       .toSequence
       .createOptional
@@ -318,6 +355,7 @@ package object config {
       .doc("Set the maximum number GPU resources to acquire for this job. Note that executors " +
         "will still launch when no GPU resources are found since this configuration is just an " +
         "upper limit and not a guaranteed amount.")
+      .version("2.1.0")
       .intConf
       .createWithDefault(0)
 
@@ -327,6 +365,7 @@ package object config {
         "Key-value pairs should be separated by a colon, and commas used to list more than one. " +
         "If your label includes a colon or comma, you can escape it with a backslash. " +
         "Ex. key:value,key2:a\\:b.")
+      .version("2.2.0")
       .stringConf
       .createWithDefault("")
 
@@ -335,6 +374,7 @@ package object config {
       .doc("Attribute-based constraints on mesos resource offers. By default, all resource " +
         "offers will be accepted. This setting applies only to executors. Refer to Mesos " +
         "Attributes & Resources doc for more information on attributes.")
+      .version("1.5.0")
       .stringConf
       .createWithDefault("")
 
@@ -344,6 +384,7 @@ package object config {
         "Mesos supports two types of containerizers for docker: the \"docker\" containerizer, " +
         "and the preferred \"mesos\" containerizer. " +
         "Read more here: http://mesos.apache.org/documentation/latest/container-image/")
+      .version("2.1.0")
       .stringConf
       .checkValues(Set("docker", "mesos"))
       .createWithDefault("docker")
@@ -352,6 +393,7 @@ package object config {
     ConfigBuilder("spark.mesos.role")
       .doc("Set the role of this Spark framework for Mesos. Roles are used in Mesos for " +
         "reservations and resource weight sharing.")
+      .version("1.5.0")
       .stringConf
       .createOptional
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org