You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by gu...@apache.org on 2020/04/07 11:30:30 UTC

[spark] branch branch-3.0 updated (796f9cf -> 26d8465)

This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a change to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/spark.git.


    from 796f9cf  [SPARK-29311][SQL][FOLLOWUP] Add migration guide for extracting second from datetimes
     new 2a10e9f  Revert "[SPARK-31002][CORE][DOC][3.0] Add version information to the configuration of Core"
     new 26d8465  Revert "[SPARK-30841][SQL][DOC][3.0] Add version information to the configuration of SQL"

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../org/apache/spark/internal/config/package.scala | 340 ++-------------------
 .../org/apache/spark/sql/internal/SQLConf.scala    | 267 +---------------
 2 files changed, 34 insertions(+), 573 deletions(-)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org


[spark] 01/02: Revert "[SPARK-31002][CORE][DOC][3.0] Add version information to the configuration of Core"

Posted by gu...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/spark.git

commit 2a10e9fcb705a238573cc78bc5be8cd801989dbe
Author: HyukjinKwon <gu...@apache.org>
AuthorDate: Tue Apr 7 20:29:12 2020 +0900

    Revert "[SPARK-31002][CORE][DOC][3.0] Add version information to the configuration of Core"
    
    This reverts commit a5444db82ace1bfca32ec8d5b9f9823e91ac9511.
---
 .../org/apache/spark/internal/config/package.scala | 340 ++-------------------
 1 file changed, 28 insertions(+), 312 deletions(-)

diff --git a/core/src/main/scala/org/apache/spark/internal/config/package.scala b/core/src/main/scala/org/apache/spark/internal/config/package.scala
index fe0e476..1308a46 100644
--- a/core/src/main/scala/org/apache/spark/internal/config/package.scala
+++ b/core/src/main/scala/org/apache/spark/internal/config/package.scala
@@ -45,7 +45,6 @@ package object config {
         "custom implementation. Spark will try each class specified until one of them " +
         "returns the resource information for that resource. It tries the discovery " +
         "script last if none of the plugins return information for that resource.")
-      .version("3.0.0")
       .stringConf
       .toSequence
       .createWithDefault(Nil)
@@ -56,113 +55,89 @@ package object config {
       .doc("Path to a file containing the resources allocated to the driver. " +
         "The file should be formatted as a JSON array of ResourceAllocation objects. " +
         "Only used internally in standalone mode.")
-      .version("3.0.0")
       .stringConf
       .createOptional
 
   private[spark] val DRIVER_CLASS_PATH =
-    ConfigBuilder(SparkLauncher.DRIVER_EXTRA_CLASSPATH)
-      .version("1.0.0")
-      .stringConf
-      .createOptional
+    ConfigBuilder(SparkLauncher.DRIVER_EXTRA_CLASSPATH).stringConf.createOptional
 
   private[spark] val DRIVER_JAVA_OPTIONS =
     ConfigBuilder(SparkLauncher.DRIVER_EXTRA_JAVA_OPTIONS)
       .withPrepended(SparkLauncher.DRIVER_DEFAULT_JAVA_OPTIONS)
-      .version("1.0.0")
       .stringConf
       .createOptional
 
   private[spark] val DRIVER_LIBRARY_PATH =
-    ConfigBuilder(SparkLauncher.DRIVER_EXTRA_LIBRARY_PATH)
-      .version("1.0.0")
-      .stringConf
-      .createOptional
+    ConfigBuilder(SparkLauncher.DRIVER_EXTRA_LIBRARY_PATH).stringConf.createOptional
 
   private[spark] val DRIVER_USER_CLASS_PATH_FIRST =
-    ConfigBuilder("spark.driver.userClassPathFirst")
-      .version("1.3.0")
-      .booleanConf
-      .createWithDefault(false)
+    ConfigBuilder("spark.driver.userClassPathFirst").booleanConf.createWithDefault(false)
 
   private[spark] val DRIVER_CORES = ConfigBuilder("spark.driver.cores")
     .doc("Number of cores to use for the driver process, only in cluster mode.")
-    .version("1.3.0")
     .intConf
     .createWithDefault(1)
 
   private[spark] val DRIVER_MEMORY = ConfigBuilder(SparkLauncher.DRIVER_MEMORY)
     .doc("Amount of memory to use for the driver process, in MiB unless otherwise specified.")
-    .version("1.1.1")
     .bytesConf(ByteUnit.MiB)
     .createWithDefaultString("1g")
 
   private[spark] val DRIVER_MEMORY_OVERHEAD = ConfigBuilder("spark.driver.memoryOverhead")
     .doc("The amount of non-heap memory to be allocated per driver in cluster mode, " +
       "in MiB unless otherwise specified.")
-    .version("2.3.0")
     .bytesConf(ByteUnit.MiB)
     .createOptional
 
   private[spark] val DRIVER_LOG_DFS_DIR =
-    ConfigBuilder("spark.driver.log.dfsDir").version("3.0.0").stringConf.createOptional
+    ConfigBuilder("spark.driver.log.dfsDir").stringConf.createOptional
 
   private[spark] val DRIVER_LOG_LAYOUT =
     ConfigBuilder("spark.driver.log.layout")
-      .version("3.0.0")
       .stringConf
       .createOptional
 
   private[spark] val DRIVER_LOG_PERSISTTODFS =
     ConfigBuilder("spark.driver.log.persistToDfs.enabled")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
   private[spark] val DRIVER_LOG_ALLOW_EC =
     ConfigBuilder("spark.driver.log.allowErasureCoding")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
   private[spark] val EVENT_LOG_ENABLED = ConfigBuilder("spark.eventLog.enabled")
-    .version("1.0.0")
     .booleanConf
     .createWithDefault(false)
 
   private[spark] val EVENT_LOG_DIR = ConfigBuilder("spark.eventLog.dir")
-    .version("1.0.0")
     .stringConf
     .createWithDefault(EventLoggingListener.DEFAULT_LOG_DIR)
 
   private[spark] val EVENT_LOG_COMPRESS =
     ConfigBuilder("spark.eventLog.compress")
-      .version("1.0.0")
       .booleanConf
       .createWithDefault(false)
 
   private[spark] val EVENT_LOG_BLOCK_UPDATES =
     ConfigBuilder("spark.eventLog.logBlockUpdates.enabled")
-      .version("2.3.0")
       .booleanConf
       .createWithDefault(false)
 
   private[spark] val EVENT_LOG_ALLOW_EC =
     ConfigBuilder("spark.eventLog.erasureCoding.enabled")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
   private[spark] val EVENT_LOG_TESTING =
     ConfigBuilder("spark.eventLog.testing")
       .internal()
-      .version("1.0.1")
       .booleanConf
       .createWithDefault(false)
 
   private[spark] val EVENT_LOG_OUTPUT_BUFFER_SIZE = ConfigBuilder("spark.eventLog.buffer.kb")
     .doc("Buffer size to use when writing to output streams, in KiB unless otherwise specified.")
-    .version("1.0.0")
     .bytesConf(ByteUnit.KiB)
     .createWithDefaultString("100k")
 
@@ -170,7 +145,6 @@ package object config {
     ConfigBuilder("spark.eventLog.logStageExecutorMetrics")
       .doc("Whether to write per-stage peaks of executor metrics (for each executor) " +
         "to the event log.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -179,7 +153,6 @@ package object config {
       .doc("Names of supported young generation garbage collector. A name usually is " +
         " the return of GarbageCollectorMXBean.getName. The built-in young generation garbage " +
         s"collectors are ${GarbageCollectionMetrics.YOUNG_GENERATION_BUILTIN_GARBAGE_COLLECTORS}")
-      .version("3.0.0")
       .stringConf
       .toSequence
       .createWithDefault(GarbageCollectionMetrics.YOUNG_GENERATION_BUILTIN_GARBAGE_COLLECTORS)
@@ -189,28 +162,20 @@ package object config {
       .doc("Names of supported old generation garbage collector. A name usually is " +
         "the return of GarbageCollectorMXBean.getName. The built-in old generation garbage " +
         s"collectors are ${GarbageCollectionMetrics.OLD_GENERATION_BUILTIN_GARBAGE_COLLECTORS}")
-      .version("3.0.0")
       .stringConf
       .toSequence
       .createWithDefault(GarbageCollectionMetrics.OLD_GENERATION_BUILTIN_GARBAGE_COLLECTORS)
 
   private[spark] val EVENT_LOG_OVERWRITE =
-    ConfigBuilder("spark.eventLog.overwrite")
-      .version("1.0.0")
-      .booleanConf
-      .createWithDefault(false)
+    ConfigBuilder("spark.eventLog.overwrite").booleanConf.createWithDefault(false)
 
   private[spark] val EVENT_LOG_CALLSITE_LONG_FORM =
-    ConfigBuilder("spark.eventLog.longForm.enabled")
-      .version("2.4.0")
-      .booleanConf
-      .createWithDefault(false)
+    ConfigBuilder("spark.eventLog.longForm.enabled").booleanConf.createWithDefault(false)
 
   private[spark] val EVENT_LOG_ENABLE_ROLLING =
     ConfigBuilder("spark.eventLog.rolling.enabled")
       .doc("Whether rolling over event log files is enabled. If set to true, it cuts down " +
         "each event log file to the configured size.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -218,46 +183,35 @@ package object config {
     ConfigBuilder("spark.eventLog.rolling.maxFileSize")
       .doc(s"When ${EVENT_LOG_ENABLE_ROLLING.key}=true, specifies the max size of event log file" +
         " to be rolled over.")
-      .version("3.0.0")
       .bytesConf(ByteUnit.BYTE)
       .checkValue(_ >= ByteUnit.MiB.toBytes(10), "Max file size of event log should be " +
         "configured to be at least 10 MiB.")
       .createWithDefaultString("128m")
 
   private[spark] val EXECUTOR_ID =
-    ConfigBuilder("spark.executor.id").version("1.2.0").stringConf.createOptional
+    ConfigBuilder("spark.executor.id").stringConf.createOptional
 
   private[spark] val EXECUTOR_CLASS_PATH =
-    ConfigBuilder(SparkLauncher.EXECUTOR_EXTRA_CLASSPATH)
-      .version("1.0.0")
-      .stringConf
-      .createOptional
+    ConfigBuilder(SparkLauncher.EXECUTOR_EXTRA_CLASSPATH).stringConf.createOptional
 
   private[spark] val EXECUTOR_HEARTBEAT_DROP_ZERO_ACCUMULATOR_UPDATES =
     ConfigBuilder("spark.executor.heartbeat.dropZeroAccumulatorUpdates")
       .internal()
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
   private[spark] val EXECUTOR_HEARTBEAT_INTERVAL =
     ConfigBuilder("spark.executor.heartbeatInterval")
-      .version("1.1.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .createWithDefaultString("10s")
 
   private[spark] val EXECUTOR_HEARTBEAT_MAX_FAILURES =
-    ConfigBuilder("spark.executor.heartbeat.maxFailures")
-      .internal()
-      .version("1.6.2")
-      .intConf
-      .createWithDefault(60)
+    ConfigBuilder("spark.executor.heartbeat.maxFailures").internal().intConf.createWithDefault(60)
 
   private[spark] val EXECUTOR_PROCESS_TREE_METRICS_ENABLED =
     ConfigBuilder("spark.executor.processTreeMetrics.enabled")
       .doc("Whether to collect process tree metrics (from the /proc filesystem) when collecting " +
         "executor metrics.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -266,44 +220,33 @@ package object config {
       .doc("How often to collect executor metrics (in milliseconds). " +
         "If 0, the polling is done on executor heartbeats. " +
         "If positive, the polling is done at this interval.")
-      .version("3.0.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .createWithDefaultString("0")
 
   private[spark] val EXECUTOR_JAVA_OPTIONS =
     ConfigBuilder(SparkLauncher.EXECUTOR_EXTRA_JAVA_OPTIONS)
       .withPrepended(SparkLauncher.EXECUTOR_DEFAULT_JAVA_OPTIONS)
-      .version("1.0.0")
       .stringConf
       .createOptional
 
   private[spark] val EXECUTOR_LIBRARY_PATH =
-    ConfigBuilder(SparkLauncher.EXECUTOR_EXTRA_LIBRARY_PATH)
-      .version("1.0.0")
-      .stringConf
-      .createOptional
+    ConfigBuilder(SparkLauncher.EXECUTOR_EXTRA_LIBRARY_PATH).stringConf.createOptional
 
   private[spark] val EXECUTOR_USER_CLASS_PATH_FIRST =
-    ConfigBuilder("spark.executor.userClassPathFirst")
-      .version("1.3.0")
-      .booleanConf
-      .createWithDefault(false)
+    ConfigBuilder("spark.executor.userClassPathFirst").booleanConf.createWithDefault(false)
 
   private[spark] val EXECUTOR_CORES = ConfigBuilder(SparkLauncher.EXECUTOR_CORES)
-    .version("1.0.0")
     .intConf
     .createWithDefault(1)
 
   private[spark] val EXECUTOR_MEMORY = ConfigBuilder(SparkLauncher.EXECUTOR_MEMORY)
     .doc("Amount of memory to use per executor process, in MiB unless otherwise specified.")
-    .version("0.7.0")
     .bytesConf(ByteUnit.MiB)
     .createWithDefaultString("1g")
 
   private[spark] val EXECUTOR_MEMORY_OVERHEAD = ConfigBuilder("spark.executor.memoryOverhead")
     .doc("The amount of non-heap memory to be allocated per executor in cluster mode, " +
       "in MiB unless otherwise specified.")
-    .version("2.3.0")
     .bytesConf(ByteUnit.MiB)
     .createOptional
 
@@ -313,14 +256,12 @@ package object config {
       "the cluster (not from each machine). If not set, the default will be " +
       "`spark.deploy.defaultCores` on Spark's standalone cluster manager, or infinite " +
       "(all available cores) on Mesos.")
-    .version("0.6.0")
     .intConf
     .createOptional
 
   private[spark] val MEMORY_OFFHEAP_ENABLED = ConfigBuilder("spark.memory.offHeap.enabled")
     .doc("If true, Spark will attempt to use off-heap memory for certain operations. " +
       "If off-heap memory use is enabled, then spark.memory.offHeap.size must be positive.")
-    .version("1.6.0")
     .withAlternative("spark.unsafe.offHeap")
     .booleanConf
     .createWithDefault(false)
@@ -331,7 +272,6 @@ package object config {
       "This setting has no impact on heap memory usage, so if your executors' total memory " +
       "consumption must fit within some hard limit then be sure to shrink your JVM heap size " +
       "accordingly. This must be set to a positive value when spark.memory.offHeap.enabled=true.")
-    .version("1.6.0")
     .bytesConf(ByteUnit.BYTE)
     .checkValue(_ >= 0, "The off-heap memory size must not be negative")
     .createWithDefault(0)
@@ -341,7 +281,6 @@ package object config {
       "size of the region set aside by spark.memory.fraction. The higher this is, the " +
       "less working memory may be available to execution and tasks may spill to disk more " +
       "often. Leaving this at the default value is recommended. ")
-    .version("1.6.0")
     .doubleConf
     .checkValue(v => v >= 0.0 && v < 1.0, "Storage fraction must be in [0,1)")
     .createWithDefault(0.5)
@@ -352,19 +291,16 @@ package object config {
       "The purpose of this config is to set aside memory for internal metadata, " +
       "user data structures, and imprecise size estimation in the case of sparse, " +
       "unusually large records. Leaving this at the default value is recommended.  ")
-    .version("1.6.0")
     .doubleConf
     .createWithDefault(0.6)
 
   private[spark] val STORAGE_SAFETY_FRACTION = ConfigBuilder("spark.storage.safetyFraction")
-    .version("1.1.0")
     .doubleConf
     .createWithDefault(0.9)
 
   private[spark] val STORAGE_UNROLL_MEMORY_THRESHOLD =
     ConfigBuilder("spark.storage.unrollMemoryThreshold")
       .doc("Initial memory to request before unrolling any block")
-      .version("1.1.0")
       .longConf
       .createWithDefault(1024 * 1024)
 
@@ -374,7 +310,6 @@ package object config {
         "Cached RDD block replicas lost due to executor failures are replenished " +
         "if there are any existing available replicas. This tries to " +
         "get the replication level of the block to the initial number")
-      .version("2.2.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -385,54 +320,41 @@ package object config {
         "This prevents Spark from memory mapping very small blocks. " +
         "In general, memory mapping has high overhead for blocks close to or below " +
         "the page size of the operating system.")
-      .version("0.9.2")
       .bytesConf(ByteUnit.BYTE)
       .createWithDefaultString("2m")
 
   private[spark] val STORAGE_REPLICATION_POLICY =
     ConfigBuilder("spark.storage.replication.policy")
-      .version("2.1.0")
       .stringConf
       .createWithDefaultString(classOf[RandomBlockReplicationPolicy].getName)
 
   private[spark] val STORAGE_REPLICATION_TOPOLOGY_MAPPER =
     ConfigBuilder("spark.storage.replication.topologyMapper")
-      .version("2.1.0")
       .stringConf
       .createWithDefaultString(classOf[DefaultTopologyMapper].getName)
 
   private[spark] val STORAGE_CACHED_PEERS_TTL = ConfigBuilder("spark.storage.cachedPeersTtl")
-    .version("1.1.1")
-    .intConf
-    .createWithDefault(60 * 1000)
+    .intConf.createWithDefault(60 * 1000)
 
   private[spark] val STORAGE_MAX_REPLICATION_FAILURE =
     ConfigBuilder("spark.storage.maxReplicationFailures")
-      .version("1.1.1")
-      .intConf
-      .createWithDefault(1)
+      .intConf.createWithDefault(1)
 
   private[spark] val STORAGE_REPLICATION_TOPOLOGY_FILE =
-    ConfigBuilder("spark.storage.replication.topologyFile")
-      .version("2.1.0")
-      .stringConf
-      .createOptional
+    ConfigBuilder("spark.storage.replication.topologyFile").stringConf.createOptional
 
   private[spark] val STORAGE_EXCEPTION_PIN_LEAK =
     ConfigBuilder("spark.storage.exceptionOnPinLeak")
-      .version("1.6.2")
       .booleanConf
       .createWithDefault(false)
 
   private[spark] val STORAGE_BLOCKMANAGER_TIMEOUTINTERVAL =
     ConfigBuilder("spark.storage.blockManagerTimeoutIntervalMs")
-      .version("0.7.3")
       .timeConf(TimeUnit.MILLISECONDS)
       .createWithDefaultString("60s")
 
   private[spark] val STORAGE_BLOCKMANAGER_SLAVE_TIMEOUT =
     ConfigBuilder("spark.storage.blockManagerSlaveTimeoutMs")
-      .version("0.7.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .createWithDefaultString(Network.NETWORK_TIMEOUT.defaultValueString)
 
@@ -440,7 +362,6 @@ package object config {
     ConfigBuilder("spark.storage.cleanupFilesAfterExecutorExit")
       .doc("Whether or not cleanup the files not served by the external shuffle service " +
         "on executor exits.")
-      .version("2.4.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -448,7 +369,6 @@ package object config {
     ConfigBuilder("spark.diskStore.subDirectories")
       .doc("Number of subdirectories inside each path listed in spark.local.dir for " +
         "hashing Block files into.")
-      .version("0.6.0")
       .intConf
       .checkValue(_ > 0, "The number of subdirectories must be positive.")
       .createWithDefault(64)
@@ -457,101 +377,71 @@ package object config {
     ConfigBuilder("spark.block.failures.beforeLocationRefresh")
       .doc("Max number of failures before this block manager refreshes " +
         "the block locations from the driver.")
-      .version("2.0.0")
       .intConf
       .createWithDefault(5)
 
-  private[spark] val IS_PYTHON_APP =
-    ConfigBuilder("spark.yarn.isPython")
-      .internal()
-      .version("1.5.0")
-      .booleanConf
-      .createWithDefault(false)
+  private[spark] val IS_PYTHON_APP = ConfigBuilder("spark.yarn.isPython").internal()
+    .booleanConf.createWithDefault(false)
 
   private[spark] val CPUS_PER_TASK = ConfigBuilder("spark.task.cpus").intConf.createWithDefault(1)
 
   private[spark] val DYN_ALLOCATION_ENABLED =
-    ConfigBuilder("spark.dynamicAllocation.enabled")
-      .version("1.2.0")
-      .booleanConf
-      .createWithDefault(false)
+    ConfigBuilder("spark.dynamicAllocation.enabled").booleanConf.createWithDefault(false)
 
   private[spark] val DYN_ALLOCATION_TESTING =
-    ConfigBuilder("spark.dynamicAllocation.testing")
-      .version("1.2.0")
-      .booleanConf
-      .createWithDefault(false)
+    ConfigBuilder("spark.dynamicAllocation.testing").booleanConf.createWithDefault(false)
 
   private[spark] val DYN_ALLOCATION_MIN_EXECUTORS =
-    ConfigBuilder("spark.dynamicAllocation.minExecutors")
-      .version("1.2.0")
-      .intConf
-      .createWithDefault(0)
+    ConfigBuilder("spark.dynamicAllocation.minExecutors").intConf.createWithDefault(0)
 
   private[spark] val DYN_ALLOCATION_INITIAL_EXECUTORS =
     ConfigBuilder("spark.dynamicAllocation.initialExecutors")
-      .version("1.3.0")
       .fallbackConf(DYN_ALLOCATION_MIN_EXECUTORS)
 
   private[spark] val DYN_ALLOCATION_MAX_EXECUTORS =
-    ConfigBuilder("spark.dynamicAllocation.maxExecutors")
-      .version("1.2.0")
-      .intConf
-      .createWithDefault(Int.MaxValue)
+    ConfigBuilder("spark.dynamicAllocation.maxExecutors").intConf.createWithDefault(Int.MaxValue)
 
   private[spark] val DYN_ALLOCATION_EXECUTOR_ALLOCATION_RATIO =
     ConfigBuilder("spark.dynamicAllocation.executorAllocationRatio")
-      .version("2.4.0")
-      .doubleConf
-      .createWithDefault(1.0)
+      .doubleConf.createWithDefault(1.0)
 
   private[spark] val DYN_ALLOCATION_CACHED_EXECUTOR_IDLE_TIMEOUT =
     ConfigBuilder("spark.dynamicAllocation.cachedExecutorIdleTimeout")
-      .version("1.4.0")
       .timeConf(TimeUnit.SECONDS)
       .checkValue(_ >= 0L, "Timeout must be >= 0.")
       .createWithDefault(Integer.MAX_VALUE)
 
   private[spark] val DYN_ALLOCATION_EXECUTOR_IDLE_TIMEOUT =
     ConfigBuilder("spark.dynamicAllocation.executorIdleTimeout")
-      .version("1.2.0")
       .timeConf(TimeUnit.SECONDS)
       .checkValue(_ >= 0L, "Timeout must be >= 0.")
       .createWithDefault(60)
 
   private[spark] val DYN_ALLOCATION_SHUFFLE_TRACKING =
     ConfigBuilder("spark.dynamicAllocation.shuffleTracking.enabled")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
   private[spark] val DYN_ALLOCATION_SHUFFLE_TIMEOUT =
     ConfigBuilder("spark.dynamicAllocation.shuffleTimeout")
-      .version("3.0.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .checkValue(_ >= 0L, "Timeout must be >= 0.")
       .createWithDefault(Long.MaxValue)
 
   private[spark] val DYN_ALLOCATION_SCHEDULER_BACKLOG_TIMEOUT =
     ConfigBuilder("spark.dynamicAllocation.schedulerBacklogTimeout")
-      .version("1.2.0")
       .timeConf(TimeUnit.SECONDS).createWithDefault(1)
 
   private[spark] val DYN_ALLOCATION_SUSTAINED_SCHEDULER_BACKLOG_TIMEOUT =
     ConfigBuilder("spark.dynamicAllocation.sustainedSchedulerBacklogTimeout")
-      .version("1.2.0")
       .fallbackConf(DYN_ALLOCATION_SCHEDULER_BACKLOG_TIMEOUT)
 
   private[spark] val LOCALITY_WAIT = ConfigBuilder("spark.locality.wait")
-    .version("0.5.0")
     .timeConf(TimeUnit.MILLISECONDS)
     .createWithDefaultString("3s")
 
   private[spark] val SHUFFLE_SERVICE_ENABLED =
-    ConfigBuilder("spark.shuffle.service.enabled")
-      .version("1.2.0")
-      .booleanConf
-      .createWithDefault(false)
+    ConfigBuilder("spark.shuffle.service.enabled").booleanConf.createWithDefault(false)
 
   private[spark] val SHUFFLE_SERVICE_FETCH_RDD_ENABLED =
     ConfigBuilder(Constants.SHUFFLE_SERVICE_FETCH_RDD_ENABLED)
@@ -559,7 +449,6 @@ package object config {
         "In case of dynamic allocation if this feature is enabled executors having only disk " +
         "persisted blocks are considered idle after " +
         "'spark.dynamicAllocation.executorIdleTimeout' and will be released accordingly.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -567,25 +456,21 @@ package object config {
     ConfigBuilder("spark.shuffle.service.db.enabled")
       .doc("Whether to use db in ExternalShuffleService. Note that this only affects " +
         "standalone mode.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
   private[spark] val SHUFFLE_SERVICE_PORT =
-    ConfigBuilder("spark.shuffle.service.port").version("1.2.0").intConf.createWithDefault(7337)
+    ConfigBuilder("spark.shuffle.service.port").intConf.createWithDefault(7337)
 
   private[spark] val KEYTAB = ConfigBuilder("spark.kerberos.keytab")
     .doc("Location of user's keytab.")
-    .version("3.0.0")
     .stringConf.createOptional
 
   private[spark] val PRINCIPAL = ConfigBuilder("spark.kerberos.principal")
     .doc("Name of the Kerberos principal.")
-    .version("3.0.0")
     .stringConf.createOptional
 
   private[spark] val KERBEROS_RELOGIN_PERIOD = ConfigBuilder("spark.kerberos.relogin.period")
-    .version("3.0.0")
     .timeConf(TimeUnit.SECONDS)
     .createWithDefaultString("1m")
 
@@ -595,7 +480,6 @@ package object config {
         "Which credentials to use when renewing delegation tokens for executors. Can be either " +
         "'keytab', the default, which requires a keytab to be provided, or 'ccache', which uses " +
         "the local credentials cache.")
-      .version("3.0.0")
       .stringConf
       .checkValues(Set("keytab", "ccache"))
       .createWithDefault("keytab")
@@ -604,124 +488,104 @@ package object config {
     ConfigBuilder("spark.kerberos.access.hadoopFileSystems")
     .doc("Extra Hadoop filesystem URLs for which to request delegation tokens. The filesystem " +
       "that hosts fs.defaultFS does not need to be listed here.")
-    .version("3.0.0")
     .stringConf
     .toSequence
     .createWithDefault(Nil)
 
   private[spark] val EXECUTOR_INSTANCES = ConfigBuilder("spark.executor.instances")
-    .version("1.0.0")
     .intConf
     .createOptional
 
   private[spark] val PY_FILES = ConfigBuilder("spark.yarn.dist.pyFiles")
     .internal()
-    .version("2.2.1")
     .stringConf
     .toSequence
     .createWithDefault(Nil)
 
   private[spark] val TASK_MAX_DIRECT_RESULT_SIZE =
     ConfigBuilder("spark.task.maxDirectResultSize")
-      .version("2.0.0")
       .bytesConf(ByteUnit.BYTE)
       .createWithDefault(1L << 20)
 
   private[spark] val TASK_MAX_FAILURES =
     ConfigBuilder("spark.task.maxFailures")
-      .version("0.8.0")
       .intConf
       .createWithDefault(4)
 
   private[spark] val TASK_REAPER_ENABLED =
     ConfigBuilder("spark.task.reaper.enabled")
-      .version("2.0.3")
       .booleanConf
       .createWithDefault(false)
 
   private[spark] val TASK_REAPER_KILL_TIMEOUT =
     ConfigBuilder("spark.task.reaper.killTimeout")
-      .version("2.0.3")
       .timeConf(TimeUnit.MILLISECONDS)
       .createWithDefault(-1)
 
   private[spark] val TASK_REAPER_POLLING_INTERVAL =
     ConfigBuilder("spark.task.reaper.pollingInterval")
-      .version("2.0.3")
       .timeConf(TimeUnit.MILLISECONDS)
       .createWithDefaultString("10s")
 
   private[spark] val TASK_REAPER_THREAD_DUMP =
     ConfigBuilder("spark.task.reaper.threadDump")
-      .version("2.0.3")
       .booleanConf
       .createWithDefault(true)
 
   // Blacklist confs
   private[spark] val BLACKLIST_ENABLED =
     ConfigBuilder("spark.blacklist.enabled")
-      .version("2.1.0")
       .booleanConf
       .createOptional
 
   private[spark] val MAX_TASK_ATTEMPTS_PER_EXECUTOR =
     ConfigBuilder("spark.blacklist.task.maxTaskAttemptsPerExecutor")
-      .version("2.1.0")
       .intConf
       .createWithDefault(1)
 
   private[spark] val MAX_TASK_ATTEMPTS_PER_NODE =
     ConfigBuilder("spark.blacklist.task.maxTaskAttemptsPerNode")
-      .version("2.1.0")
       .intConf
       .createWithDefault(2)
 
   private[spark] val MAX_FAILURES_PER_EXEC =
     ConfigBuilder("spark.blacklist.application.maxFailedTasksPerExecutor")
-      .version("2.2.0")
       .intConf
       .createWithDefault(2)
 
   private[spark] val MAX_FAILURES_PER_EXEC_STAGE =
     ConfigBuilder("spark.blacklist.stage.maxFailedTasksPerExecutor")
-      .version("2.1.0")
       .intConf
       .createWithDefault(2)
 
   private[spark] val MAX_FAILED_EXEC_PER_NODE =
     ConfigBuilder("spark.blacklist.application.maxFailedExecutorsPerNode")
-      .version("2.2.0")
       .intConf
       .createWithDefault(2)
 
   private[spark] val MAX_FAILED_EXEC_PER_NODE_STAGE =
     ConfigBuilder("spark.blacklist.stage.maxFailedExecutorsPerNode")
-      .version("2.1.0")
       .intConf
       .createWithDefault(2)
 
   private[spark] val BLACKLIST_TIMEOUT_CONF =
     ConfigBuilder("spark.blacklist.timeout")
-      .version("2.1.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .createOptional
 
   private[spark] val BLACKLIST_KILL_ENABLED =
     ConfigBuilder("spark.blacklist.killBlacklistedExecutors")
-      .version("2.2.0")
       .booleanConf
       .createWithDefault(false)
 
   private[spark] val BLACKLIST_LEGACY_TIMEOUT_CONF =
     ConfigBuilder("spark.scheduler.executorTaskBlacklistTime")
       .internal()
-      .version("1.0.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .createOptional
 
   private[spark] val BLACKLIST_FETCH_FAILURE_ENABLED =
     ConfigBuilder("spark.blacklist.application.fetchFailure.enabled")
-      .version("2.3.0")
       .booleanConf
       .createWithDefault(false)
   // End blacklist confs
@@ -731,7 +595,6 @@ package object config {
       .doc("Whether to un-register all the outputs on the host in condition that we receive " +
         " a FetchFailure. This is set default to false, which means, we only un-register the " +
         " outputs related to the exact executor(instead of the host) on a FetchFailure.")
-      .version("2.3.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -741,7 +604,6 @@ package object config {
         "an event queue using capacity specified by `spark.scheduler.listenerbus" +
         ".eventqueue.queueName.capacity` first. If it's not configured, Spark will " +
         "use the default capacity specified by this config.")
-      .version("2.3.0")
       .intConf
       .checkValue(_ > 0, "The capacity of listener bus event queue must be positive")
       .createWithDefault(10000)
@@ -749,7 +611,6 @@ package object config {
   private[spark] val LISTENER_BUS_METRICS_MAX_LISTENER_CLASSES_TIMED =
     ConfigBuilder("spark.scheduler.listenerbus.metrics.maxListenerClassesTimed")
       .internal()
-      .version("2.3.0")
       .intConf
       .createWithDefault(128)
 
@@ -759,7 +620,6 @@ package object config {
       .doc("When enabled, log the event that takes too much time to process. This helps us " +
         "discover the event types that cause performance bottlenecks. The time threshold is " +
         "controlled by spark.scheduler.listenerbus.logSlowEvent.threshold.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -768,65 +628,52 @@ package object config {
       .internal()
       .doc("The time threshold of whether a event is considered to be taking too much time to " +
         s"process. Log the event if ${LISTENER_BUS_LOG_SLOW_EVENT_ENABLED.key} is true.")
-      .version("3.0.0")
       .timeConf(TimeUnit.NANOSECONDS)
       .createWithDefaultString("1s")
 
   // This property sets the root namespace for metrics reporting
   private[spark] val METRICS_NAMESPACE = ConfigBuilder("spark.metrics.namespace")
-    .version("2.1.0")
     .stringConf
     .createOptional
 
   private[spark] val METRICS_CONF = ConfigBuilder("spark.metrics.conf")
-    .version("0.8.0")
     .stringConf
     .createOptional
 
   private[spark] val METRICS_EXECUTORMETRICS_SOURCE_ENABLED =
     ConfigBuilder("spark.metrics.executorMetricsSource.enabled")
       .doc("Whether to register the ExecutorMetrics source with the metrics system.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
   private[spark] val METRICS_STATIC_SOURCES_ENABLED =
     ConfigBuilder("spark.metrics.staticSources.enabled")
       .doc("Whether to register static sources with the metrics system.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
   private[spark] val PYSPARK_DRIVER_PYTHON = ConfigBuilder("spark.pyspark.driver.python")
-    .version("2.1.0")
     .stringConf
     .createOptional
 
   private[spark] val PYSPARK_PYTHON = ConfigBuilder("spark.pyspark.python")
-    .version("2.1.0")
     .stringConf
     .createOptional
 
   // To limit how many applications are shown in the History Server summary ui
   private[spark] val HISTORY_UI_MAX_APPS =
-    ConfigBuilder("spark.history.ui.maxApplications")
-      .version("2.0.1")
-      .intConf
-      .createWithDefault(Integer.MAX_VALUE)
+    ConfigBuilder("spark.history.ui.maxApplications").intConf.createWithDefault(Integer.MAX_VALUE)
 
   private[spark] val IO_ENCRYPTION_ENABLED = ConfigBuilder("spark.io.encryption.enabled")
-    .version("2.1.0")
     .booleanConf
     .createWithDefault(false)
 
   private[spark] val IO_ENCRYPTION_KEYGEN_ALGORITHM =
     ConfigBuilder("spark.io.encryption.keygen.algorithm")
-      .version("2.1.0")
       .stringConf
       .createWithDefault("HmacSHA1")
 
   private[spark] val IO_ENCRYPTION_KEY_SIZE_BITS = ConfigBuilder("spark.io.encryption.keySizeBits")
-    .version("2.1.0")
     .intConf
     .checkValues(Set(128, 192, 256))
     .createWithDefault(128)
@@ -834,68 +681,57 @@ package object config {
   private[spark] val IO_CRYPTO_CIPHER_TRANSFORMATION =
     ConfigBuilder("spark.io.crypto.cipher.transformation")
       .internal()
-      .version("2.1.0")
       .stringConf
       .createWithDefaultString("AES/CTR/NoPadding")
 
   private[spark] val DRIVER_HOST_ADDRESS = ConfigBuilder("spark.driver.host")
     .doc("Address of driver endpoints.")
-    .version("0.7.0")
     .stringConf
     .createWithDefault(Utils.localCanonicalHostName())
 
   private[spark] val DRIVER_PORT = ConfigBuilder("spark.driver.port")
     .doc("Port of driver endpoints.")
-    .version("0.7.0")
     .intConf
     .createWithDefault(0)
 
   private[spark] val DRIVER_SUPERVISE = ConfigBuilder("spark.driver.supervise")
     .doc("If true, restarts the driver automatically if it fails with a non-zero exit status. " +
       "Only has effect in Spark standalone mode or Mesos cluster deploy mode.")
-    .version("1.3.0")
     .booleanConf
     .createWithDefault(false)
 
   private[spark] val DRIVER_BIND_ADDRESS = ConfigBuilder("spark.driver.bindAddress")
     .doc("Address where to bind network listen sockets on the driver.")
-    .version("2.1.0")
     .fallbackConf(DRIVER_HOST_ADDRESS)
 
   private[spark] val BLOCK_MANAGER_PORT = ConfigBuilder("spark.blockManager.port")
     .doc("Port to use for the block manager when a more specific setting is not provided.")
-    .version("1.1.0")
     .intConf
     .createWithDefault(0)
 
   private[spark] val DRIVER_BLOCK_MANAGER_PORT = ConfigBuilder("spark.driver.blockManager.port")
     .doc("Port to use for the block manager on the driver.")
-    .version("2.1.0")
     .fallbackConf(BLOCK_MANAGER_PORT)
 
   private[spark] val IGNORE_CORRUPT_FILES = ConfigBuilder("spark.files.ignoreCorruptFiles")
     .doc("Whether to ignore corrupt files. If true, the Spark jobs will continue to run when " +
       "encountering corrupted or non-existing files and contents that have been read will still " +
       "be returned.")
-    .version("2.1.0")
     .booleanConf
     .createWithDefault(false)
 
   private[spark] val IGNORE_MISSING_FILES = ConfigBuilder("spark.files.ignoreMissingFiles")
     .doc("Whether to ignore missing files. If true, the Spark jobs will continue to run when " +
       "encountering missing files and the contents that have been read will still be returned.")
-    .version("2.4.0")
     .booleanConf
     .createWithDefault(false)
 
   private[spark] val APP_CALLER_CONTEXT = ConfigBuilder("spark.log.callerContext")
-    .version("2.2.0")
     .stringConf
     .createOptional
 
   private[spark] val FILES_MAX_PARTITION_BYTES = ConfigBuilder("spark.files.maxPartitionBytes")
     .doc("The maximum number of bytes to pack into a single partition when reading files.")
-    .version("2.1.0")
     .bytesConf(ByteUnit.BYTE)
     .createWithDefault(128 * 1024 * 1024)
 
@@ -904,7 +740,6 @@ package object config {
       " the same time. This is used when putting multiple files into a partition. It's better to" +
       " over estimate, then the partitions with small files will be faster than partitions with" +
       " bigger files.")
-    .version("2.1.0")
     .bytesConf(ByteUnit.BYTE)
     .createWithDefault(4 * 1024 * 1024)
 
@@ -912,7 +747,6 @@ package object config {
     ConfigBuilder("spark.hadoopRDD.ignoreEmptySplits")
       .internal()
       .doc("When true, HadoopRDD/NewHadoopRDD will not create partitions for empty input splits.")
-      .version("2.3.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -922,7 +756,6 @@ package object config {
         "driver and executor environments contain sensitive information. When this regex matches " +
         "a property key or value, the value is redacted from the environment UI and various logs " +
         "like YARN and event logs.")
-      .version("2.1.2")
       .regexConf
       .createWithDefault("(?i)secret|password|token".r)
 
@@ -931,31 +764,26 @@ package object config {
       .doc("Regex to decide which parts of strings produced by Spark contain sensitive " +
         "information. When this regex matches a string part, that string part is replaced by a " +
         "dummy value. This is currently used to redact the output of SQL explain commands.")
-      .version("2.2.0")
       .regexConf
       .createOptional
 
   private[spark] val AUTH_SECRET =
     ConfigBuilder("spark.authenticate.secret")
-      .version("1.0.0")
       .stringConf
       .createOptional
 
   private[spark] val AUTH_SECRET_BIT_LENGTH =
     ConfigBuilder("spark.authenticate.secretBitLength")
-      .version("1.6.0")
       .intConf
       .createWithDefault(256)
 
   private[spark] val NETWORK_AUTH_ENABLED =
     ConfigBuilder("spark.authenticate")
-      .version("1.0.0")
       .booleanConf
       .createWithDefault(false)
 
   private[spark] val SASL_ENCRYPTION_ENABLED =
     ConfigBuilder("spark.authenticate.enableSaslEncryption")
-      .version("1.4.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -965,7 +793,6 @@ package object config {
         "loaded from this path on both the driver and the executors if overrides are not set for " +
         "either entity (see below). File-based secret keys are only allowed when using " +
         "Kubernetes.")
-      .version("3.0.0")
       .stringConf
       .createOptional
 
@@ -978,7 +805,6 @@ package object config {
         "be specified for the executors. The fallback configuration allows the same path to be " +
         "used for both the driver and the executors when running in cluster mode. File-based " +
         "secret keys are only allowed when using Kubernetes.")
-      .version("3.0.0")
       .fallbackConf(AUTH_SECRET_FILE)
 
   private[spark] val AUTH_SECRET_FILE_EXECUTOR =
@@ -990,14 +816,12 @@ package object config {
         "specified for the executors. The fallback configuration allows the same path to be " +
         "used for both the driver and the executors when running in cluster mode. File-based " +
         "secret keys are only allowed when using Kubernetes.")
-      .version("3.0.0")
       .fallbackConf(AUTH_SECRET_FILE)
 
   private[spark] val BUFFER_WRITE_CHUNK_SIZE =
     ConfigBuilder("spark.buffer.write.chunkSize")
       .internal()
       .doc("The chunk size in bytes during writing out the bytes of ChunkedByteBuffer.")
-      .version("2.3.0")
       .bytesConf(ByteUnit.BYTE)
       .checkValue(_ <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH,
         "The chunk size during writing out the bytes of ChunkedByteBuffer should" +
@@ -1008,7 +832,6 @@ package object config {
     ConfigBuilder("spark.checkpoint.compress")
       .doc("Whether to compress RDD checkpoints. Generally a good idea. Compression will use " +
         "spark.io.compression.codec.")
-      .version("2.2.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -1019,7 +842,6 @@ package object config {
         "Caching preferred locations can relieve query loading to DFS and save the query " +
         "time. The drawback is that the cached locations can be possibly outdated and " +
         "lose data locality. If this config is not specified, it will not cache.")
-      .version("3.0.0")
       .timeConf(TimeUnit.MINUTES)
       .checkValue(_ > 0, "The expire time for caching preferred locations cannot be non-positive.")
       .createOptional
@@ -1029,14 +851,12 @@ package object config {
       .doc("Threshold in bytes above which the size of shuffle blocks in " +
         "HighlyCompressedMapStatus is accurately recorded. This helps to prevent OOM " +
         "by avoiding underestimating shuffle block size when fetch shuffle blocks.")
-      .version("2.2.1")
       .bytesConf(ByteUnit.BYTE)
       .createWithDefault(100 * 1024 * 1024)
 
   private[spark] val SHUFFLE_REGISTRATION_TIMEOUT =
     ConfigBuilder("spark.shuffle.registration.timeout")
       .doc("Timeout in milliseconds for registration to the external shuffle service.")
-      .version("2.3.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .createWithDefault(5000)
 
@@ -1044,7 +864,6 @@ package object config {
     ConfigBuilder("spark.shuffle.registration.maxAttempts")
       .doc("When we fail to register to the external shuffle service, we will " +
         "retry for maxAttempts times.")
-      .version("2.3.0")
       .intConf
       .createWithDefault(3)
 
@@ -1055,7 +874,6 @@ package object config {
         "address in a single fetch or simultaneously, this could crash the serving executor or " +
         "Node Manager. This is especially useful to reduce the load on the Node Manager when " +
         "external shuffle is enabled. You can mitigate the issue by setting it to a lower value.")
-      .version("2.2.1")
       .intConf
       .checkValue(_ > 0, "The max no. of blocks in flight cannot be non-positive.")
       .createWithDefault(Int.MaxValue)
@@ -1067,7 +885,6 @@ package object config {
         "configuration will affect both shuffle fetch and block manager remote block fetch. " +
         "For users who enabled external shuffle service, this feature can only work when " +
         "external shuffle service is at least 2.3.0.")
-      .version("3.0.0")
       .bytesConf(ByteUnit.BYTE)
       // fetch-to-mem is guaranteed to fail if the message is bigger than 2 GB, so we might
       // as well use fetch-to-disk in that case.  The message includes some metadata in addition
@@ -1083,14 +900,12 @@ package object config {
       .doc("Enable tracking of updatedBlockStatuses in the TaskMetrics. Off by default since " +
         "tracking the block statuses can use a lot of memory and its not used anywhere within " +
         "spark.")
-      .version("2.3.0")
       .booleanConf
       .createWithDefault(false)
 
   private[spark] val SHUFFLE_IO_PLUGIN_CLASS =
     ConfigBuilder("spark.shuffle.sort.io.plugin.class")
       .doc("Name of the class to use for shuffle IO.")
-      .version("3.0.0")
       .stringConf
       .createWithDefault(classOf[LocalDiskShuffleDataIO].getName)
 
@@ -1099,7 +914,6 @@ package object config {
       .doc("Size of the in-memory buffer for each shuffle file output stream, in KiB unless " +
         "otherwise specified. These buffers reduce the number of disk seeks and system calls " +
         "made in creating intermediate shuffle files.")
-      .version("1.4.0")
       .bytesConf(ByteUnit.KiB)
       .checkValue(v => v > 0 && v <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024,
         s"The file buffer size must be positive and less than or equal to" +
@@ -1110,7 +924,6 @@ package object config {
     ConfigBuilder("spark.shuffle.unsafe.file.output.buffer")
       .doc("The file system for this buffer size after each partition " +
         "is written in unsafe shuffle writer. In KiB unless otherwise specified.")
-      .version("2.3.0")
       .bytesConf(ByteUnit.KiB)
       .checkValue(v => v > 0 && v <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH / 1024,
         s"The buffer size must be positive and less than or equal to" +
@@ -1120,7 +933,6 @@ package object config {
   private[spark] val SHUFFLE_DISK_WRITE_BUFFER_SIZE =
     ConfigBuilder("spark.shuffle.spill.diskWriteBufferSize")
       .doc("The buffer size, in bytes, to use when writing the sorted records to an on-disk file.")
-      .version("2.3.0")
       .bytesConf(ByteUnit.BYTE)
       .checkValue(v => v > 12 && v <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH,
         s"The buffer size must be greater than 12 and less than or equal to " +
@@ -1132,7 +944,6 @@ package object config {
       .internal()
       .doc("The memory check period is used to determine how often we should check whether "
         + "there is a need to request more memory when we try to unroll the given block in memory.")
-      .version("2.3.0")
       .longConf
       .createWithDefault(16)
 
@@ -1140,7 +951,6 @@ package object config {
     ConfigBuilder("spark.storage.unrollMemoryGrowthFactor")
       .internal()
       .doc("Memory to request as a multiple of the size that used to unroll the block.")
-      .version("2.3.0")
       .doubleConf
       .createWithDefault(1.5)
 
@@ -1151,14 +961,12 @@ package object config {
         "where the YARN service does not support schemes that are supported by Spark, like http, " +
         "https and ftp, or jars required to be in the local YARN client's classpath. Wildcard " +
         "'*' is denoted to download resources for all the schemes.")
-      .version("2.3.0")
       .stringConf
       .toSequence
       .createWithDefault(Nil)
 
   private[spark] val EXTRA_LISTENERS = ConfigBuilder("spark.extraListeners")
     .doc("Class names of listeners to add to SparkContext during initialization.")
-    .version("1.3.0")
     .stringConf
     .toSequence
     .createOptional
@@ -1170,7 +978,6 @@ package object config {
         "By default it's Integer.MAX_VALUE, which means we never force the sorter to spill, " +
         "until we reach some limitations, like the max page size limitation for the pointer " +
         "array in the sorter.")
-      .version("1.6.0")
       .intConf
       .createWithDefault(Integer.MAX_VALUE)
 
@@ -1180,35 +987,30 @@ package object config {
       .doc("Multi-thread is used when the number of mappers * shuffle partitions is greater than " +
         "or equal to this threshold. Note that the actual parallelism is calculated by number of " +
         "mappers * shuffle partitions / this threshold + 1, so this threshold should be positive.")
-      .version("2.3.0")
       .intConf
       .checkValue(v => v > 0, "The threshold should be positive.")
       .createWithDefault(10000000)
 
   private[spark] val MAX_RESULT_SIZE = ConfigBuilder("spark.driver.maxResultSize")
     .doc("Size limit for results.")
-    .version("1.2.0")
     .bytesConf(ByteUnit.BYTE)
     .createWithDefaultString("1g")
 
   private[spark] val CREDENTIALS_RENEWAL_INTERVAL_RATIO =
     ConfigBuilder("spark.security.credentials.renewalRatio")
       .doc("Ratio of the credential's expiration time when Spark should fetch new credentials.")
-      .version("2.4.0")
       .doubleConf
       .createWithDefault(0.75d)
 
   private[spark] val CREDENTIALS_RENEWAL_RETRY_WAIT =
     ConfigBuilder("spark.security.credentials.retryWait")
       .doc("How long to wait before retrying to fetch new credentials after a failure.")
-      .version("2.4.0")
       .timeConf(TimeUnit.SECONDS)
       .createWithDefaultString("1h")
 
   private[spark] val SHUFFLE_SORT_INIT_BUFFER_SIZE =
     ConfigBuilder("spark.shuffle.sort.initialBufferSize")
       .internal()
-      .version("2.1.0")
       .bytesConf(ByteUnit.BYTE)
       .checkValue(v => v > 0 && v <= Int.MaxValue,
         s"The buffer size must be greater than 0 and less than or equal to ${Int.MaxValue}.")
@@ -1218,7 +1020,6 @@ package object config {
     ConfigBuilder("spark.shuffle.compress")
       .doc("Whether to compress shuffle output. Compression will use " +
         "spark.io.compression.codec.")
-      .version("0.6.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -1226,7 +1027,6 @@ package object config {
     ConfigBuilder("spark.shuffle.spill.compress")
       .doc("Whether to compress data spilled during shuffles. Compression will use " +
         "spark.io.compression.codec.")
-      .version("0.9.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -1236,7 +1036,6 @@ package object config {
       .doc("The codec used to compress MapStatus, which is generated by ShuffleMapTask. " +
         "By default, Spark provides four codecs: lz4, lzf, snappy, and zstd. You can also " +
         "use fully qualified class names to specify the codec.")
-      .version("3.0.0")
       .stringConf
       .createWithDefault("zstd")
 
@@ -1245,7 +1044,6 @@ package object config {
       .internal()
       .doc("Initial threshold for the size of a collection before we start tracking its " +
         "memory usage.")
-      .version("1.1.1")
       .bytesConf(ByteUnit.BYTE)
       .createWithDefault(5 * 1024 * 1024)
 
@@ -1253,7 +1051,6 @@ package object config {
     ConfigBuilder("spark.shuffle.spill.batchSize")
       .internal()
       .doc("Size of object batches when reading/writing from serializers.")
-      .version("0.9.0")
       .longConf
       .createWithDefault(10000)
 
@@ -1261,40 +1058,34 @@ package object config {
     ConfigBuilder("spark.shuffle.sort.bypassMergeThreshold")
       .doc("In the sort-based shuffle manager, avoid merge-sorting data if there is no " +
         "map-side aggregation and there are at most this many reduce partitions")
-      .version("1.1.1")
       .intConf
       .createWithDefault(200)
 
   private[spark] val SHUFFLE_MANAGER =
     ConfigBuilder("spark.shuffle.manager")
-      .version("1.1.0")
       .stringConf
       .createWithDefault("sort")
 
   private[spark] val SHUFFLE_REDUCE_LOCALITY_ENABLE =
     ConfigBuilder("spark.shuffle.reduceLocality.enabled")
       .doc("Whether to compute locality preferences for reduce tasks")
-      .version("1.5.0")
       .booleanConf
       .createWithDefault(true)
 
   private[spark] val SHUFFLE_MAPOUTPUT_MIN_SIZE_FOR_BROADCAST =
     ConfigBuilder("spark.shuffle.mapOutput.minSizeForBroadcast")
       .doc("The size at which we use Broadcast to send the map output statuses to the executors.")
-      .version("2.0.0")
       .bytesConf(ByteUnit.BYTE)
       .createWithDefaultString("512k")
 
   private[spark] val SHUFFLE_MAPOUTPUT_DISPATCHER_NUM_THREADS =
     ConfigBuilder("spark.shuffle.mapOutput.dispatcher.numThreads")
-      .version("2.0.0")
       .intConf
       .createWithDefault(8)
 
   private[spark] val SHUFFLE_DETECT_CORRUPT =
     ConfigBuilder("spark.shuffle.detectCorrupt")
       .doc("Whether to detect any corruption in fetched blocks.")
-      .version("2.2.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -1304,21 +1095,18 @@ package object config {
         "by using extra memory to detect early corruption. Any IOException thrown will cause " +
         "the task to be retried once and if it fails again with same exception, then " +
         "FetchFailedException will be thrown to retry previous stage")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
   private[spark] val SHUFFLE_SYNC =
     ConfigBuilder("spark.shuffle.sync")
       .doc("Whether to force outstanding writes to disk.")
-      .version("0.8.0")
       .booleanConf
       .createWithDefault(false)
 
   private[spark] val SHUFFLE_UNSAFE_FAST_MERGE_ENABLE =
     ConfigBuilder("spark.shuffle.unsafe.fastMergeEnabled")
       .doc("Whether to perform a fast spill merge.")
-      .version("1.4.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -1326,7 +1114,6 @@ package object config {
     ConfigBuilder("spark.shuffle.sort.useRadixSort")
       .doc("Whether to use radix sort for sorting in-memory partition ids. Radix sort is much " +
         "faster, but requires additional memory to be reserved memory as pointers are added.")
-      .version("2.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -1334,7 +1121,6 @@ package object config {
     ConfigBuilder("spark.shuffle.minNumPartitionsToHighlyCompress")
       .internal()
       .doc("Number of partitions to determine if MapStatus should use HighlyCompressedMapStatus")
-      .version("2.4.0")
       .intConf
       .checkValue(v => v > 0, "The value should be a positive integer.")
       .createWithDefault(2000)
@@ -1344,7 +1130,6 @@ package object config {
       .doc("Whether to use the old protocol while doing the shuffle block fetching. " +
         "It is only enabled while we need the compatibility in the scenario of new Spark " +
         "version job fetching shuffle blocks from old version external shuffle service.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -1354,7 +1139,6 @@ package object config {
         s"shuffle `${SHUFFLE_SERVICE_ENABLED.key}` is enabled), shuffle " +
         "blocks requested from those block managers which are running on the same host are read " +
         "from the disk directly instead of being fetched as remote blocks over the network.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -1365,7 +1149,6 @@ package object config {
         "unbounded store. This cache will be used to avoid the network in case of fetching disk " +
         s"persisted RDD blocks or shuffle blocks " +
         s"(when `${SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED.key}` is set) from the same host.")
-      .version("3.0.0")
       .intConf
       .createWithDefault(1000)
 
@@ -1373,7 +1156,6 @@ package object config {
     ConfigBuilder("spark.storage.memoryMapLimitForTests")
       .internal()
       .doc("For testing only, controls the size of chunks when memory mapping a file")
-      .version("2.3.0")
       .bytesConf(ByteUnit.BYTE)
       .createWithDefault(ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH)
 
@@ -1383,7 +1165,6 @@ package object config {
         "coordinator didn't receive all the sync messages from barrier tasks within the " +
         "configured time, throw a SparkException to fail all the tasks. The default value is set " +
         "to 31536000(3600 * 24 * 365) so the barrier() call shall wait for one year.")
-      .version("2.4.0")
       .timeConf(TimeUnit.SECONDS)
       .checkValue(v => v > 0, "The value should be a positive time value.")
       .createWithDefaultString("365d")
@@ -1392,7 +1173,6 @@ package object config {
     ConfigBuilder("spark.scheduler.blacklist.unschedulableTaskSetTimeout")
       .doc("The timeout in seconds to wait to acquire a new executor and schedule a task " +
         "before aborting a TaskSet which is unschedulable because of being completely blacklisted.")
-      .version("2.4.1")
       .timeConf(TimeUnit.SECONDS)
       .checkValue(v => v >= 0, "The value should be a non negative time value.")
       .createWithDefault(120)
@@ -1407,7 +1187,6 @@ package object config {
         "configured max failure times for a job then fail current job submission. Note this " +
         "config only applies to jobs that contain one or more barrier stages, we won't perform " +
         "the check on non-barrier jobs.")
-      .version("2.4.0")
       .timeConf(TimeUnit.SECONDS)
       .createWithDefaultString("15s")
 
@@ -1421,7 +1200,6 @@ package object config {
         "max failure times for a job then fail current job submission. Note this config only " +
         "applies to jobs that contain one or more barrier stages, we won't perform the check on " +
         "non-barrier jobs.")
-      .version("2.4.0")
       .intConf
       .checkValue(v => v > 0, "The max failures should be a positive value.")
       .createWithDefault(40)
@@ -1429,21 +1207,18 @@ package object config {
   private[spark] val UNSAFE_EXCEPTION_ON_MEMORY_LEAK =
     ConfigBuilder("spark.unsafe.exceptionOnMemoryLeak")
       .internal()
-      .version("1.4.0")
       .booleanConf
       .createWithDefault(false)
 
   private[spark] val UNSAFE_SORTER_SPILL_READ_AHEAD_ENABLED =
     ConfigBuilder("spark.unsafe.sorter.spill.read.ahead.enabled")
       .internal()
-      .version("2.3.0")
       .booleanConf
       .createWithDefault(true)
 
   private[spark] val UNSAFE_SORTER_SPILL_READER_BUFFER_SIZE =
     ConfigBuilder("spark.unsafe.sorter.spill.reader.buffer.size")
       .internal()
-      .version("2.1.0")
       .bytesConf(ByteUnit.BYTE)
       .checkValue(v => 1024 * 1024 <= v && v <= MAX_BUFFER_SIZE_BYTES,
         s"The value must be in allowed range [1,048,576, ${MAX_BUFFER_SIZE_BYTES}].")
@@ -1456,83 +1231,63 @@ package object config {
       .withPrepended(DEFAULT_PLUGINS_LIST, separator = ",")
       .doc("Comma-separated list of class names implementing " +
         "org.apache.spark.api.plugin.SparkPlugin to load into the application.")
-      .version("3.0.0")
       .stringConf
       .toSequence
       .createWithDefault(Nil)
 
   private[spark] val CLEANER_PERIODIC_GC_INTERVAL =
     ConfigBuilder("spark.cleaner.periodicGC.interval")
-      .version("1.6.0")
       .timeConf(TimeUnit.SECONDS)
       .createWithDefaultString("30min")
 
   private[spark] val CLEANER_REFERENCE_TRACKING =
     ConfigBuilder("spark.cleaner.referenceTracking")
-      .version("1.0.0")
       .booleanConf
       .createWithDefault(true)
 
   private[spark] val CLEANER_REFERENCE_TRACKING_BLOCKING =
     ConfigBuilder("spark.cleaner.referenceTracking.blocking")
-      .version("1.0.0")
       .booleanConf
       .createWithDefault(true)
 
   private[spark] val CLEANER_REFERENCE_TRACKING_BLOCKING_SHUFFLE =
     ConfigBuilder("spark.cleaner.referenceTracking.blocking.shuffle")
-      .version("1.1.1")
       .booleanConf
       .createWithDefault(false)
 
   private[spark] val CLEANER_REFERENCE_TRACKING_CLEAN_CHECKPOINTS =
     ConfigBuilder("spark.cleaner.referenceTracking.cleanCheckpoints")
-      .version("1.4.0")
       .booleanConf
       .createWithDefault(false)
 
   private[spark] val EXECUTOR_LOGS_ROLLING_STRATEGY =
-    ConfigBuilder("spark.executor.logs.rolling.strategy")
-      .version("1.1.0")
-      .stringConf
-      .createWithDefault("")
+    ConfigBuilder("spark.executor.logs.rolling.strategy").stringConf.createWithDefault("")
 
   private[spark] val EXECUTOR_LOGS_ROLLING_TIME_INTERVAL =
-    ConfigBuilder("spark.executor.logs.rolling.time.interval")
-      .version("1.1.0")
-      .stringConf
-      .createWithDefault("daily")
+    ConfigBuilder("spark.executor.logs.rolling.time.interval").stringConf.createWithDefault("daily")
 
   private[spark] val EXECUTOR_LOGS_ROLLING_MAX_SIZE =
     ConfigBuilder("spark.executor.logs.rolling.maxSize")
-      .version("1.4.0")
       .stringConf
       .createWithDefault((1024 * 1024).toString)
 
   private[spark] val EXECUTOR_LOGS_ROLLING_MAX_RETAINED_FILES =
-    ConfigBuilder("spark.executor.logs.rolling.maxRetainedFiles")
-      .version("1.1.0")
-      .intConf
-      .createWithDefault(-1)
+    ConfigBuilder("spark.executor.logs.rolling.maxRetainedFiles").intConf.createWithDefault(-1)
 
   private[spark] val EXECUTOR_LOGS_ROLLING_ENABLE_COMPRESSION =
     ConfigBuilder("spark.executor.logs.rolling.enableCompression")
-      .version("2.0.2")
       .booleanConf
       .createWithDefault(false)
 
   private[spark] val MASTER_REST_SERVER_ENABLED = ConfigBuilder("spark.master.rest.enabled")
-    .version("1.3.0")
     .booleanConf
     .createWithDefault(false)
 
   private[spark] val MASTER_REST_SERVER_PORT = ConfigBuilder("spark.master.rest.port")
-    .version("1.3.0")
     .intConf
     .createWithDefault(6066)
 
   private[spark] val MASTER_UI_PORT = ConfigBuilder("spark.master.ui.port")
-    .version("1.1.0")
     .intConf
     .createWithDefault(8080)
 
@@ -1541,7 +1296,6 @@ package object config {
       .doc("Block size in bytes used in Snappy compression, in the case when " +
         "Snappy compression codec is used. Lowering this block size " +
         "will also lower shuffle memory usage when Snappy is used")
-      .version("1.4.0")
       .bytesConf(ByteUnit.BYTE)
       .createWithDefaultString("32k")
 
@@ -1550,7 +1304,6 @@ package object config {
       .doc("Block size in bytes used in LZ4 compression, in the case when LZ4 compression" +
         "codec is used. Lowering this block size will also lower shuffle memory " +
         "usage when LZ4 is used.")
-      .version("1.4.0")
       .bytesConf(ByteUnit.BYTE)
       .createWithDefaultString("32k")
 
@@ -1560,7 +1313,6 @@ package object config {
         "broadcast variables and shuffle outputs. By default, Spark provides four codecs: " +
         "lz4, lzf, snappy, and zstd. You can also use fully qualified class names to specify " +
         "the codec")
-      .version("0.8.0")
       .stringConf
       .createWithDefaultString("lz4")
 
@@ -1570,7 +1322,6 @@ package object config {
         "compression codec is used. Lowering this size will lower the shuffle " +
         "memory usage when Zstd is used, but it might increase the compression " +
         "cost because of excessive JNI call overhead")
-      .version("2.3.0")
       .bytesConf(ByteUnit.BYTE)
       .createWithDefaultString("32k")
 
@@ -1578,7 +1329,6 @@ package object config {
     ConfigBuilder("spark.io.compression.zstd.level")
       .doc("Compression level for Zstd compression codec. Increasing the compression " +
         "level will result in better compression at the expense of more CPU and memory")
-      .version("2.3.0")
       .intConf
       .createWithDefault(1)
 
@@ -1587,7 +1337,6 @@ package object config {
       .internal()
       .doc("If the size in bytes of a file loaded by Spark exceeds this threshold, " +
         "a warning is logged with the possible reasons.")
-      .version("3.0.0")
       .bytesConf(ByteUnit.BYTE)
       .createWithDefault(1024 * 1024 * 1024)
 
@@ -1596,34 +1345,28 @@ package object config {
       .doc("The codec used to compress event log. By default, Spark provides four codecs: " +
         "lz4, lzf, snappy, and zstd. You can also use fully qualified class names to specify " +
         "the codec. If this is not given, spark.io.compression.codec will be used.")
-      .version("3.0.0")
       .fallbackConf(IO_COMPRESSION_CODEC)
 
   private[spark] val BUFFER_SIZE =
     ConfigBuilder("spark.buffer.size")
-      .version("0.5.0")
       .intConf
       .checkValue(_ >= 0, "The buffer size must not be negative")
       .createWithDefault(65536)
 
   private[spark] val LOCALITY_WAIT_PROCESS = ConfigBuilder("spark.locality.wait.process")
-    .version("0.8.0")
     .fallbackConf(LOCALITY_WAIT)
 
   private[spark] val LOCALITY_WAIT_NODE = ConfigBuilder("spark.locality.wait.node")
-    .version("0.8.0")
     .fallbackConf(LOCALITY_WAIT)
 
   private[spark] val LOCALITY_WAIT_RACK = ConfigBuilder("spark.locality.wait.rack")
-    .version("0.8.0")
     .fallbackConf(LOCALITY_WAIT)
 
-  private[spark] val REDUCER_MAX_SIZE_IN_FLIGHT = ConfigBuilder("spark.reducer.maxSizeInFlight")
+    private[spark] val REDUCER_MAX_SIZE_IN_FLIGHT = ConfigBuilder("spark.reducer.maxSizeInFlight")
     .doc("Maximum size of map outputs to fetch simultaneously from each reduce task, " +
       "in MiB unless otherwise specified. Since each output requires us to create a " +
       "buffer to receive it, this represents a fixed memory overhead per reduce task, " +
       "so keep it small unless you have a large amount of memory")
-    .version("1.4.0")
     .bytesConf(ByteUnit.MiB)
     .createWithDefaultString("48m")
 
@@ -1633,14 +1376,12 @@ package object config {
       "it might lead to very large number of inbound connections to one or more nodes, " +
       "causing the workers to fail under load. By allowing it to limit the number of " +
       "fetch requests, this scenario can be mitigated")
-    .version("2.0.0")
     .intConf
     .createWithDefault(Int.MaxValue)
 
   private[spark] val BROADCAST_COMPRESS = ConfigBuilder("spark.broadcast.compress")
     .doc("Whether to compress broadcast variables before sending them. " +
       "Generally a good idea. Compression will use spark.io.compression.codec")
-    .version("0.6.0")
     .booleanConf.createWithDefault(true)
 
   private[spark] val BROADCAST_BLOCKSIZE = ConfigBuilder("spark.broadcast.blockSize")
@@ -1648,7 +1389,6 @@ package object config {
       "KiB unless otherwise specified. Too large a value decreases " +
       "parallelism during broadcast (makes it slower); however, " +
       "if it is too small, BlockManager might take a performance hit")
-    .version("0.5.0")
     .bytesConf(ByteUnit.KiB)
     .createWithDefaultString("4m")
 
@@ -1658,14 +1398,12 @@ package object config {
       "corrupted blocks, at the cost of computing and sending a little " +
       "more data. It's possible to disable it if the network has other " +
       "mechanisms to guarantee data won't be corrupted during broadcast")
-    .version("2.1.1")
     .booleanConf.createWithDefault(true)
 
   private[spark] val BROADCAST_FOR_UDF_COMPRESSION_THRESHOLD =
     ConfigBuilder("spark.broadcast.UDFCompressionThreshold")
       .doc("The threshold at which user-defined functions (UDFs) and Python RDD commands " +
         "are compressed by broadcast in bytes unless otherwise specified")
-      .version("3.0.0")
       .bytesConf(ByteUnit.BYTE)
       .checkValue(v => v >= 0, "The threshold should be non-negative.")
       .createWithDefault(1L * 1024 * 1024)
@@ -1676,111 +1414,92 @@ package object config {
       "or StorageLevel.MEMORY_ONLY in Python). Can save substantial " +
       "space at the cost of some extra CPU time. " +
       "Compression will use spark.io.compression.codec")
-    .version("0.6.0")
     .booleanConf.createWithDefault(false)
 
   private[spark] val RDD_PARALLEL_LISTING_THRESHOLD =
     ConfigBuilder("spark.rdd.parallelListingThreshold")
-      .version("2.0.0")
       .intConf
       .createWithDefault(10)
 
   private[spark] val RDD_LIMIT_SCALE_UP_FACTOR =
     ConfigBuilder("spark.rdd.limit.scaleUpFactor")
-      .version("2.1.0")
       .intConf
       .createWithDefault(4)
 
   private[spark] val SERIALIZER = ConfigBuilder("spark.serializer")
-    .version("0.5.0")
     .stringConf
     .createWithDefault("org.apache.spark.serializer.JavaSerializer")
 
   private[spark] val SERIALIZER_OBJECT_STREAM_RESET =
     ConfigBuilder("spark.serializer.objectStreamReset")
-      .version("1.0.0")
       .intConf
       .createWithDefault(100)
 
   private[spark] val SERIALIZER_EXTRA_DEBUG_INFO = ConfigBuilder("spark.serializer.extraDebugInfo")
-    .version("1.3.0")
     .booleanConf
     .createWithDefault(true)
 
   private[spark] val JARS = ConfigBuilder("spark.jars")
-    .version("0.9.0")
     .stringConf
     .toSequence
     .createWithDefault(Nil)
 
   private[spark] val FILES = ConfigBuilder("spark.files")
-    .version("1.0.0")
     .stringConf
     .toSequence
     .createWithDefault(Nil)
 
   private[spark] val SUBMIT_DEPLOY_MODE = ConfigBuilder("spark.submit.deployMode")
-    .version("1.5.0")
     .stringConf
     .createWithDefault("client")
 
   private[spark] val SUBMIT_PYTHON_FILES = ConfigBuilder("spark.submit.pyFiles")
-    .version("1.0.1")
     .stringConf
     .toSequence
     .createWithDefault(Nil)
 
   private[spark] val SCHEDULER_ALLOCATION_FILE =
     ConfigBuilder("spark.scheduler.allocation.file")
-      .version("0.8.1")
       .stringConf
       .createOptional
 
   private[spark] val SCHEDULER_MIN_REGISTERED_RESOURCES_RATIO =
     ConfigBuilder("spark.scheduler.minRegisteredResourcesRatio")
-      .version("1.1.1")
       .doubleConf
       .createOptional
 
   private[spark] val SCHEDULER_MAX_REGISTERED_RESOURCE_WAITING_TIME =
     ConfigBuilder("spark.scheduler.maxRegisteredResourcesWaitingTime")
-      .version("1.1.1")
       .timeConf(TimeUnit.MILLISECONDS)
       .createWithDefaultString("30s")
 
   private[spark] val SCHEDULER_MODE =
     ConfigBuilder("spark.scheduler.mode")
-      .version("0.8.0")
       .stringConf
       .createWithDefault(SchedulingMode.FIFO.toString)
 
   private[spark] val SCHEDULER_REVIVE_INTERVAL =
     ConfigBuilder("spark.scheduler.revive.interval")
-      .version("0.8.1")
       .timeConf(TimeUnit.MILLISECONDS)
       .createOptional
 
   private[spark] val SPECULATION_ENABLED =
     ConfigBuilder("spark.speculation")
-      .version("0.6.0")
       .booleanConf
       .createWithDefault(false)
 
   private[spark] val SPECULATION_INTERVAL =
     ConfigBuilder("spark.speculation.interval")
-      .version("0.6.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .createWithDefault(100)
 
   private[spark] val SPECULATION_MULTIPLIER =
     ConfigBuilder("spark.speculation.multiplier")
-      .version("0.6.0")
       .doubleConf
       .createWithDefault(1.5)
 
   private[spark] val SPECULATION_QUANTILE =
     ConfigBuilder("spark.speculation.quantile")
-      .version("0.6.0")
       .doubleConf
       .createWithDefault(0.75)
 
@@ -1794,19 +1513,16 @@ package object config {
         "large enough. E.g. tasks might be re-launched if there are enough successful runs " +
         "even though the threshold hasn't been reached. The number of slots is computed based " +
         "on the conf values of spark.executor.cores and spark.task.cpus minimum 1.")
-      .version("3.0.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .createOptional
 
   private[spark] val STAGING_DIR = ConfigBuilder("spark.yarn.stagingDir")
     .doc("Staging directory used while submitting applications.")
-    .version("2.0.0")
     .stringConf
     .createOptional
 
   private[spark] val BUFFER_PAGESIZE = ConfigBuilder("spark.buffer.pageSize")
     .doc("The amount of memory used per page in bytes")
-    .version("1.5.0")
     .bytesConf(ByteUnit.BYTE)
     .createOptional
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org


[spark] 02/02: Revert "[SPARK-30841][SQL][DOC][3.0] Add version information to the configuration of SQL"

Posted by gu...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/spark.git

commit 26d84653ed886e66067b25bb784f7dd5c289c53b
Author: HyukjinKwon <gu...@apache.org>
AuthorDate: Tue Apr 7 20:29:22 2020 +0900

    Revert "[SPARK-30841][SQL][DOC][3.0] Add version information to the configuration of SQL"
    
    This reverts commit af7fa9d12622c3b2b169c4a58d143bc21ed9e784.
---
 .../org/apache/spark/sql/internal/SQLConf.scala    | 267 +--------------------
 1 file changed, 6 insertions(+), 261 deletions(-)

diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
index 7a10aba..9f9e556 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
@@ -177,7 +177,6 @@ object SQLConf {
   val ANALYZER_MAX_ITERATIONS = buildConf("spark.sql.analyzer.maxIterations")
     .internal()
     .doc("The max number of iterations the analyzer runs.")
-    .version("3.0.0")
     .intConf
     .createWithDefault(100)
 
@@ -186,14 +185,12 @@ object SQLConf {
       "specified by their rule names and separated by comma. It is not guaranteed that all the " +
       "rules in this configuration will eventually be excluded, as some rules are necessary " +
       "for correctness. The optimizer will log the rules that have indeed been excluded.")
-    .version("2.4.0")
     .stringConf
     .createOptional
 
   val OPTIMIZER_MAX_ITERATIONS = buildConf("spark.sql.optimizer.maxIterations")
     .internal()
     .doc("The max number of iterations the optimizer runs.")
-    .version("2.0.0")
     .intConf
     .createWithDefault(100)
 
@@ -201,7 +198,6 @@ object SQLConf {
     buildConf("spark.sql.optimizer.inSetConversionThreshold")
       .internal()
       .doc("The threshold of set size for InSet conversion.")
-      .version("2.0.0")
       .intConf
       .createWithDefault(10)
 
@@ -210,7 +206,6 @@ object SQLConf {
       .internal()
       .doc("Configures the max set size in InSet for which Spark will generate code with " +
         "switch statements. This is applicable only to bytes, shorts, ints, dates.")
-      .version("3.0.0")
       .intConf
       .checkValue(threshold => threshold >= 0 && threshold <= 600, "The max set size " +
         "for using switch statements in InSet must be non-negative and less than or equal to 600")
@@ -221,7 +216,6 @@ object SQLConf {
     .doc("Configures the log level for logging the change from the original plan to the new " +
       "plan after a rule or batch is applied. The value can be 'trace', 'debug', 'info', " +
       "'warn', or 'error'. The default log level is 'trace'.")
-    .version("3.0.0")
     .stringConf
     .transform(_.toUpperCase(Locale.ROOT))
     .checkValue(logLevel => Set("TRACE", "DEBUG", "INFO", "WARN", "ERROR").contains(logLevel),
@@ -233,7 +227,6 @@ object SQLConf {
     .internal()
     .doc("Configures a list of rules to be logged in the optimizer, in which the rules are " +
       "specified by their rule names and separated by comma.")
-    .version("3.0.0")
     .stringConf
     .createOptional
 
@@ -241,14 +234,12 @@ object SQLConf {
     .internal()
     .doc("Configures a list of batches to be logged in the optimizer, in which the batches " +
       "are specified by their batch names and separated by comma.")
-    .version("3.0.0")
     .stringConf
     .createOptional
 
   val DYNAMIC_PARTITION_PRUNING_ENABLED =
     buildConf("spark.sql.optimizer.dynamicPartitionPruning.enabled")
       .doc("When true, we will generate predicate for partition column when it's used as join key")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -258,7 +249,6 @@ object SQLConf {
       .doc("When true, distinct count statistics will be used for computing the data size of the " +
         "partitioned table after dynamic partition pruning, in order to evaluate if it is worth " +
         "adding an extra subquery as the pruning filter if broadcast reuse is not applicable.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -269,7 +259,6 @@ object SQLConf {
       "used as the fallback filter ratio for computing the data size of the partitioned table " +
       "after dynamic partition pruning, in order to evaluate if it is worth adding an extra " +
       "subquery as the pruning filter if broadcast reuse is not applicable.")
-    .version("3.0.0")
     .doubleConf
     .createWithDefault(0.5)
 
@@ -278,21 +267,18 @@ object SQLConf {
       .internal()
       .doc("When true, dynamic partition pruning will only apply when the broadcast exchange of " +
         "a broadcast hash join operation can be reused as the dynamic pruning filter.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
   val COMPRESS_CACHED = buildConf("spark.sql.inMemoryColumnarStorage.compressed")
     .doc("When set to true Spark SQL will automatically select a compression codec for each " +
       "column based on statistics of the data.")
-    .version("1.0.1")
     .booleanConf
     .createWithDefault(true)
 
   val COLUMN_BATCH_SIZE = buildConf("spark.sql.inMemoryColumnarStorage.batchSize")
     .doc("Controls the size of batches for columnar caching.  Larger batch sizes can improve " +
       "memory utilization and compression, but risk OOMs when caching data.")
-    .version("1.1.1")
     .intConf
     .createWithDefault(10000)
 
@@ -300,7 +286,6 @@ object SQLConf {
     buildConf("spark.sql.inMemoryColumnarStorage.partitionPruning")
       .internal()
       .doc("When true, enable partition pruning for in-memory columnar tables.")
-      .version("1.2.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -308,14 +293,12 @@ object SQLConf {
     buildConf("spark.sql.inMemoryTableScanStatistics.enable")
       .internal()
       .doc("When true, enable in-memory table scan accumulators.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
   val CACHE_VECTORIZED_READER_ENABLED =
     buildConf("spark.sql.inMemoryColumnarStorage.enableVectorizedReader")
       .doc("Enables vectorized reader for columnar caching.")
-      .version("2.3.1")
       .booleanConf
       .createWithDefault(true)
 
@@ -323,14 +306,12 @@ object SQLConf {
     buildConf("spark.sql.columnVector.offheap.enabled")
       .internal()
       .doc("When true, use OffHeapColumnVector in ColumnarBatch.")
-      .version("2.3.0")
       .booleanConf
       .createWithDefault(false)
 
   val PREFER_SORTMERGEJOIN = buildConf("spark.sql.join.preferSortMergeJoin")
     .internal()
     .doc("When true, prefer sort merge join over shuffle hash join.")
-    .version("2.0.0")
     .booleanConf
     .createWithDefault(true)
 
@@ -339,7 +320,6 @@ object SQLConf {
     .doc("When true, enable use of radix sort when possible. Radix sort is much faster but " +
       "requires additional memory to be reserved up-front. The memory overhead may be " +
       "significant when sorting very small rows (up to 50% more in this case).")
-    .version("2.0.0")
     .booleanConf
     .createWithDefault(true)
 
@@ -350,7 +330,6 @@ object SQLConf {
       "command `ANALYZE TABLE <tableName> COMPUTE STATISTICS noscan` has been " +
       "run, and file-based data source tables where the statistics are computed directly on " +
       "the files of data.")
-    .version("1.1.0")
     .bytesConf(ByteUnit.BYTE)
     .createWithDefaultString("10MB")
 
@@ -359,7 +338,6 @@ object SQLConf {
     .doc("Minimal increase rate in number of partitions between attempts when executing a take " +
       "on a query. Higher values lead to more partitions read. Lower values might lead to " +
       "longer execution times as more jobs will be run")
-    .version("2.1.1")
     .intConf
     .createWithDefault(4)
 
@@ -367,7 +345,6 @@ object SQLConf {
     buildConf("spark.sql.hive.advancedPartitionPredicatePushdown.enabled")
       .internal()
       .doc("When true, advanced partition predicate pushdown into Hive metastore is enabled.")
-      .version("2.3.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -375,7 +352,6 @@ object SQLConf {
     .doc("The default number of partitions to use when shuffling data for joins or aggregations. " +
       "Note: For structured streaming, this configuration cannot be changed between query " +
       "restarts from the same checkpoint location.")
-    .version("1.1.0")
     .intConf
     .checkValue(_ > 0, "The value of spark.sql.shuffle.partitions must be positive")
     .createWithDefault(200)
@@ -384,14 +360,12 @@ object SQLConf {
     buildConf("spark.sql.adaptive.shuffle.targetPostShuffleInputSize")
       .internal()
       .doc("(Deprecated since Spark 3.0)")
-      .version("1.6.0")
       .bytesConf(ByteUnit.BYTE)
       .createWithDefaultString("64MB")
 
   val ADAPTIVE_EXECUTION_ENABLED = buildConf("spark.sql.adaptive.enabled")
     .doc("When true, enable adaptive query execution, which re-optimizes the query plan in the " +
       "middle of query execution, based on accurate runtime statistics.")
-    .version("1.6.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -401,7 +375,6 @@ object SQLConf {
       "sub-queries. By setting this config to true (together with " +
       s"'${ADAPTIVE_EXECUTION_ENABLED.key}' set to true), Spark will force apply adaptive query " +
       "execution for all supported queries.")
-    .version("3.0.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -409,7 +382,6 @@ object SQLConf {
     .internal()
     .doc("Configures the log level for adaptive execution logging of plan changes. The value " +
       "can be 'trace', 'debug', 'info', 'warn', or 'error'. The default log level is 'debug'.")
-    .version("3.0.0")
     .stringConf
     .transform(_.toUpperCase(Locale.ROOT))
     .checkValues(Set("TRACE", "DEBUG", "INFO", "WARN", "ERROR"))
@@ -420,7 +392,6 @@ object SQLConf {
       .doc("The advisory size in bytes of the shuffle partition during adaptive optimization " +
         s"(when ${ADAPTIVE_EXECUTION_ENABLED.key} is true). It takes effect when Spark " +
         "coalesces small shuffle partitions or splits skewed shuffle partition.")
-      .version("3.0.0")
       .fallbackConf(SHUFFLE_TARGET_POSTSHUFFLE_INPUT_SIZE)
 
   val COALESCE_PARTITIONS_ENABLED =
@@ -428,7 +399,6 @@ object SQLConf {
       .doc(s"When true and '${ADAPTIVE_EXECUTION_ENABLED.key}' is true, Spark will coalesce " +
         "contiguous shuffle partitions according to the target size (specified by " +
         s"'${ADVISORY_PARTITION_SIZE_IN_BYTES.key}'), to avoid too many small tasks.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -438,7 +408,6 @@ object SQLConf {
         "value is the default parallelism of the Spark cluster. This configuration only " +
         s"has an effect when '${ADAPTIVE_EXECUTION_ENABLED.key}' and " +
         s"'${COALESCE_PARTITIONS_ENABLED.key}' are both true.")
-      .version("3.0.0")
       .intConf
       .checkValue(_ > 0, "The minimum number of partitions must be positive.")
       .createOptional
@@ -449,7 +418,6 @@ object SQLConf {
         s"${SHUFFLE_PARTITIONS.key}. This configuration only has an effect when " +
         s"'${ADAPTIVE_EXECUTION_ENABLED.key}' and '${COALESCE_PARTITIONS_ENABLED.key}' " +
         "are both true.")
-      .version("3.0.0")
       .intConf
       .checkValue(_ > 0, "The initial number of partitions must be positive.")
       .createOptional
@@ -464,7 +432,6 @@ object SQLConf {
         s"'${COALESCE_PARTITIONS_ENABLED.key}' are both true. This feature also depends " +
         "on a relocatable serializer, the concatenation support codec in use and the new version " +
         "shuffle fetch protocol.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -473,7 +440,6 @@ object SQLConf {
       .doc(s"When true and '${ADAPTIVE_EXECUTION_ENABLED.key}' is true, Spark tries to use local " +
         "shuffle reader to read the shuffle data when the shuffle partitioning is not needed, " +
         "for example, after converting sort-merge join to broadcast-hash join.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -482,7 +448,6 @@ object SQLConf {
       .doc(s"When true and '${ADAPTIVE_EXECUTION_ENABLED.key}' is true, Spark dynamically " +
         "handles skew in sort-merge join by splitting (and replicating if needed) skewed " +
         "partitions.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -491,7 +456,6 @@ object SQLConf {
       .doc("A partition is considered as skewed if its size is larger than this factor " +
         "multiplying the median partition size and also larger than " +
         "'spark.sql.adaptive.skewJoin.skewedPartitionThresholdInBytes'")
-      .version("3.0.0")
       .intConf
       .checkValue(_ > 0, "The skew factor must be positive.")
       .createWithDefault(10)
@@ -502,7 +466,6 @@ object SQLConf {
         s"threshold and also larger than '${SKEW_JOIN_SKEWED_PARTITION_FACTOR.key}' " +
         "multiplying the median partition size. Ideally this config should be set larger " +
         s"than '${ADVISORY_PARTITION_SIZE_IN_BYTES.key}'.")
-      .version("3.0.0")
       .bytesConf(ByteUnit.BYTE)
       .createWithDefaultString("256MB")
 
@@ -513,7 +476,6 @@ object SQLConf {
         "considered as the build side of a broadcast-hash join in adaptive execution regardless " +
         "of its size.This configuration only has an effect when " +
         s"'${ADAPTIVE_EXECUTION_ENABLED.key}' is true.")
-      .version("3.0.0")
       .doubleConf
       .checkValue(_ >= 0, "The non-empty partition ratio must be positive number.")
       .createWithDefault(0.2)
@@ -522,7 +484,6 @@ object SQLConf {
     buildConf("spark.sql.subexpressionElimination.enabled")
       .internal()
       .doc("When true, common subexpressions will be eliminated.")
-      .version("1.6.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -530,7 +491,6 @@ object SQLConf {
     .internal()
     .doc("Whether the query analyzer should be case sensitive or not. " +
       "Default to case insensitive. It is highly discouraged to turn on case sensitive mode.")
-    .version("1.4.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -540,7 +500,6 @@ object SQLConf {
       "plan to optimize them. Constraint propagation can sometimes be computationally expensive " +
       "for certain kinds of query plans (such as those with a large number of predicates and " +
       "aliases) which might negatively impact overall runtime.")
-    .version("2.2.0")
     .booleanConf
     .createWithDefault(true)
 
@@ -549,7 +508,6 @@ object SQLConf {
     .doc("When true, string literals (including regex patterns) remain escaped in our SQL " +
       "parser. The default is false since Spark 2.0. Setting it to true can restore the behavior " +
       "prior to Spark 2.0.")
-    .version("2.2.1")
     .booleanConf
     .createWithDefault(false)
 
@@ -558,7 +516,6 @@ object SQLConf {
     .doc("When estimating the output data size of a table scan, multiply the file size with this " +
       "factor as the estimated data size, in case the data is compressed in the file and lead to" +
       " a heavily underestimated result.")
-    .version("2.3.1")
     .doubleConf
     .checkValue(_ > 0, "the value of fileDataSizeFactor must be greater than 0")
     .createWithDefault(1.0)
@@ -567,7 +524,6 @@ object SQLConf {
     .doc("When true, the Parquet data source merges schemas collected from all data files, " +
          "otherwise the schema is picked from the summary file or a random data file " +
          "if no summary file is available.")
-    .version("1.5.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -576,7 +532,6 @@ object SQLConf {
          "summary files and we will ignore them when merging schema. Otherwise, if this is " +
          "false, which is the default, we will merge all part-files. This should be considered " +
          "as expert-only option, and shouldn't be enabled before knowing what it means exactly.")
-    .version("1.5.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -585,7 +540,6 @@ object SQLConf {
       "Spark SQL, do not differentiate between binary data and strings when writing out the " +
       "Parquet schema. This flag tells Spark SQL to interpret binary data as a string to provide " +
       "compatibility with these systems.")
-    .version("1.1.1")
     .booleanConf
     .createWithDefault(false)
 
@@ -594,7 +548,6 @@ object SQLConf {
       "Spark would also store Timestamp as INT96 because we need to avoid precision lost of the " +
       "nanoseconds field. This flag tells Spark SQL to interpret INT96 data as a timestamp to " +
       "provide compatibility with these systems.")
-    .version("1.3.0")
     .booleanConf
     .createWithDefault(true)
 
@@ -602,7 +555,6 @@ object SQLConf {
     .doc("This controls whether timestamp adjustments should be applied to INT96 data when " +
       "converting to timestamps, for data written by Impala.  This is necessary because Impala " +
       "stores INT96 data with a different timezone offset than Hive & Spark.")
-    .version("2.3.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -616,7 +568,6 @@ object SQLConf {
       "is a standard timestamp type in Parquet, which stores number of microseconds from the " +
       "Unix epoch. TIMESTAMP_MILLIS is also standard, but with millisecond precision, which " +
       "means Spark has to truncate the microsecond portion of its timestamp value.")
-    .version("2.3.0")
     .stringConf
     .transform(_.toUpperCase(Locale.ROOT))
     .checkValues(ParquetOutputTimestampType.values.map(_.toString))
@@ -628,7 +579,6 @@ object SQLConf {
       "precedence would be `compression`, `parquet.compression`, " +
       "`spark.sql.parquet.compression.codec`. Acceptable values include: none, uncompressed, " +
       "snappy, gzip, lzo, brotli, lz4, zstd.")
-    .version("1.1.1")
     .stringConf
     .transform(_.toLowerCase(Locale.ROOT))
     .checkValues(Set("none", "uncompressed", "snappy", "gzip", "lzo", "lz4", "brotli", "zstd"))
@@ -636,7 +586,6 @@ object SQLConf {
 
   val PARQUET_FILTER_PUSHDOWN_ENABLED = buildConf("spark.sql.parquet.filterPushdown")
     .doc("Enables Parquet filter push-down optimization when set to true.")
-    .version("1.2.0")
     .booleanConf
     .createWithDefault(true)
 
@@ -644,7 +593,6 @@ object SQLConf {
     .doc("If true, enables Parquet filter push-down optimization for Date. " +
       s"This configuration only has an effect when '${PARQUET_FILTER_PUSHDOWN_ENABLED.key}' is " +
       "enabled.")
-    .version("2.4.0")
     .internal()
     .booleanConf
     .createWithDefault(true)
@@ -654,17 +602,15 @@ object SQLConf {
       .doc("If true, enables Parquet filter push-down optimization for Timestamp. " +
         s"This configuration only has an effect when '${PARQUET_FILTER_PUSHDOWN_ENABLED.key}' is " +
         "enabled and Timestamp stored as TIMESTAMP_MICROS or TIMESTAMP_MILLIS type.")
-      .version("2.4.0")
-      .internal()
-      .booleanConf
-      .createWithDefault(true)
+    .internal()
+    .booleanConf
+    .createWithDefault(true)
 
   val PARQUET_FILTER_PUSHDOWN_DECIMAL_ENABLED =
     buildConf("spark.sql.parquet.filterPushdown.decimal")
       .doc("If true, enables Parquet filter push-down optimization for Decimal. " +
         s"This configuration only has an effect when '${PARQUET_FILTER_PUSHDOWN_ENABLED.key}' is " +
         "enabled.")
-      .version("2.4.0")
       .internal()
       .booleanConf
       .createWithDefault(true)
@@ -674,7 +620,6 @@ object SQLConf {
     .doc("If true, enables Parquet filter push-down optimization for string startsWith function. " +
       s"This configuration only has an effect when '${PARQUET_FILTER_PUSHDOWN_ENABLED.key}' is " +
       "enabled.")
-    .version("2.4.0")
     .internal()
     .booleanConf
     .createWithDefault(true)
@@ -687,7 +632,6 @@ object SQLConf {
         "By setting this value to 0 this feature can be disabled. " +
         s"This configuration only has an effect when '${PARQUET_FILTER_PUSHDOWN_ENABLED.key}' is " +
         "enabled.")
-      .version("2.4.0")
       .internal()
       .intConf
       .checkValue(threshold => threshold >= 0, "The threshold must not be negative.")
@@ -699,7 +643,6 @@ object SQLConf {
       "systems such as Apache Hive and Apache Impala use. If false, the newer format in Parquet " +
       "will be used. For example, decimals will be written in int-based format. If Parquet " +
       "output is intended for use with systems that do not support this newer format, set to true.")
-    .version("1.6.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -709,7 +652,6 @@ object SQLConf {
       "of org.apache.parquet.hadoop.ParquetOutputCommitter. If it is not, then metadata " +
       "summaries will never be created, irrespective of the value of " +
       "parquet.summary.metadata.level")
-    .version("1.5.0")
     .internal()
     .stringConf
     .createWithDefault("org.apache.parquet.hadoop.ParquetOutputCommitter")
@@ -717,7 +659,6 @@ object SQLConf {
   val PARQUET_VECTORIZED_READER_ENABLED =
     buildConf("spark.sql.parquet.enableVectorizedReader")
       .doc("Enables vectorized parquet decoding.")
-      .version("2.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -727,14 +668,12 @@ object SQLConf {
       s"This configuration only has an effect when '${PARQUET_FILTER_PUSHDOWN_ENABLED.key}' " +
       "is enabled and the vectorized reader is not used. You can ensure the vectorized reader " +
       s"is not used by setting '${PARQUET_VECTORIZED_READER_ENABLED.key}' to false.")
-    .version("2.3.0")
     .booleanConf
     .createWithDefault(false)
 
   val PARQUET_VECTORIZED_READER_BATCH_SIZE = buildConf("spark.sql.parquet.columnarReaderBatchSize")
     .doc("The number of rows to include in a parquet vectorized reader batch. The number should " +
       "be carefully chosen to minimize overhead and avoid OOMs in reading data.")
-    .version("2.4.0")
     .intConf
     .createWithDefault(4096)
 
@@ -743,7 +682,6 @@ object SQLConf {
       "`orc.compress` is specified in the table-specific options/properties, the precedence " +
       "would be `compression`, `orc.compress`, `spark.sql.orc.compression.codec`." +
       "Acceptable values include: none, uncompressed, snappy, zlib, lzo.")
-    .version("2.3.0")
     .stringConf
     .transform(_.toLowerCase(Locale.ROOT))
     .checkValues(Set("none", "uncompressed", "snappy", "zlib", "lzo"))
@@ -752,7 +690,6 @@ object SQLConf {
   val ORC_IMPLEMENTATION = buildConf("spark.sql.orc.impl")
     .doc("When native, use the native version of ORC support instead of the ORC library in Hive. " +
       "It is 'hive' by default prior to Spark 2.4.")
-    .version("2.3.0")
     .internal()
     .stringConf
     .checkValues(Set("hive", "native"))
@@ -760,27 +697,23 @@ object SQLConf {
 
   val ORC_VECTORIZED_READER_ENABLED = buildConf("spark.sql.orc.enableVectorizedReader")
     .doc("Enables vectorized orc decoding.")
-    .version("2.3.0")
     .booleanConf
     .createWithDefault(true)
 
   val ORC_VECTORIZED_READER_BATCH_SIZE = buildConf("spark.sql.orc.columnarReaderBatchSize")
     .doc("The number of rows to include in a orc vectorized reader batch. The number should " +
       "be carefully chosen to minimize overhead and avoid OOMs in reading data.")
-    .version("2.4.0")
     .intConf
     .createWithDefault(4096)
 
   val ORC_FILTER_PUSHDOWN_ENABLED = buildConf("spark.sql.orc.filterPushdown")
     .doc("When true, enable filter pushdown for ORC files.")
-    .version("1.4.0")
     .booleanConf
     .createWithDefault(true)
 
   val ORC_SCHEMA_MERGING_ENABLED = buildConf("spark.sql.orc.mergeSchema")
     .doc("When true, the Orc data source merges schemas collected from all data files, " +
       "otherwise the schema is picked from a random data file.")
-    .version("3.0.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -788,7 +721,6 @@ object SQLConf {
     .doc("When true, check all the partition paths under the table\'s root directory " +
          "when reading data stored in HDFS. This configuration will be deprecated in the future " +
          s"releases and replaced by ${SPARK_IGNORE_MISSING_FILES.key}.")
-    .version("1.4.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -798,7 +730,6 @@ object SQLConf {
            "unmatching partitions can be eliminated earlier. This only affects Hive tables " +
            "not converted to filesource relations (see HiveUtils.CONVERT_METASTORE_PARQUET and " +
            "HiveUtils.CONVERT_METASTORE_ORC for more information).")
-      .version("1.5.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -808,7 +739,6 @@ object SQLConf {
            "This includes both datasource and converted Hive tables. When partition management " +
            "is enabled, datasource tables store partition in the Hive metastore, and use the " +
            "metastore to prune partitions during query planning.")
-      .version("2.1.1")
       .booleanConf
       .createWithDefault(true)
 
@@ -817,7 +747,6 @@ object SQLConf {
       .doc("When nonzero, enable caching of partition file metadata in memory. All tables share " +
            "a cache that can use up to specified num bytes for file metadata. This conf only " +
            "has an effect when hive filesource partition management is enabled.")
-      .version("2.1.1")
       .longConf
       .createWithDefault(250 * 1024 * 1024)
 
@@ -833,7 +762,6 @@ object SQLConf {
       "and write it back to the table properties), INFER_ONLY (infer the schema but don't " +
       "attempt to write it to the table properties) and NEVER_INFER (the default mode-- fallback " +
       "to using the case-insensitive metastore schema instead of inferring).")
-    .version("2.1.1")
     .stringConf
     .transform(_.toUpperCase(Locale.ROOT))
     .checkValues(HiveCaseSensitiveInferenceMode.values.map(_.toString))
@@ -846,27 +774,23 @@ object SQLConf {
       "scanned are partition columns and the query has an aggregate operator that satisfies " +
       "distinct semantics. By default the optimization is disabled, since it may return " +
       "incorrect results when the files are empty.")
-    .version("2.1.1")
     .booleanConf
     .createWithDefault(false)
 
   val COLUMN_NAME_OF_CORRUPT_RECORD = buildConf("spark.sql.columnNameOfCorruptRecord")
     .doc("The name of internal column for storing raw/un-parsed JSON and CSV records that fail " +
       "to parse.")
-    .version("1.2.0")
     .stringConf
     .createWithDefault("_corrupt_record")
 
   val BROADCAST_TIMEOUT = buildConf("spark.sql.broadcastTimeout")
     .doc("Timeout in seconds for the broadcast wait time in broadcast joins.")
-    .version("1.3.0")
     .timeConf(TimeUnit.SECONDS)
     .createWithDefaultString(s"${5 * 60}")
 
   // This is only used for the thriftserver
   val THRIFTSERVER_POOL = buildConf("spark.sql.thriftserver.scheduler.pool")
     .doc("Set a Fair Scheduler pool for a JDBC client session.")
-    .version("1.1.1")
     .stringConf
     .createOptional
 
@@ -874,27 +798,23 @@ object SQLConf {
     buildConf("spark.sql.thriftServer.incrementalCollect")
       .internal()
       .doc("When true, enable incremental collection for execution in Thrift Server.")
-      .version("2.0.3")
       .booleanConf
       .createWithDefault(false)
 
   val THRIFTSERVER_UI_STATEMENT_LIMIT =
     buildConf("spark.sql.thriftserver.ui.retainedStatements")
       .doc("The number of SQL statements kept in the JDBC/ODBC web UI history.")
-      .version("1.4.0")
       .intConf
       .createWithDefault(200)
 
   val THRIFTSERVER_UI_SESSION_LIMIT = buildConf("spark.sql.thriftserver.ui.retainedSessions")
     .doc("The number of SQL client sessions kept in the JDBC/ODBC web UI history.")
-    .version("1.4.0")
     .intConf
     .createWithDefault(200)
 
   // This is used to set the default data source
   val DEFAULT_DATA_SOURCE_NAME = buildConf("spark.sql.sources.default")
     .doc("The default data source to use in input/output.")
-    .version("1.3.0")
     .stringConf
     .createWithDefault("parquet")
 
@@ -903,7 +823,6 @@ object SQLConf {
     .doc("When true, a table created by a Hive CTAS statement (no USING clause) " +
       "without specifying any storage property will be converted to a data source table, " +
       s"using the data source set by ${DEFAULT_DATA_SOURCE_NAME.key}.")
-    .version("2.0.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -912,26 +831,22 @@ object SQLConf {
       .doc("When true, fast stats (number of files and total size of all files) will be gathered" +
         " in parallel while repairing table partitions to avoid the sequential listing in Hive" +
         " metastore.")
-      .version("2.0.1")
       .booleanConf
       .createWithDefault(true)
 
   val PARTITION_COLUMN_TYPE_INFERENCE =
     buildConf("spark.sql.sources.partitionColumnTypeInference.enabled")
       .doc("When true, automatically infer the data types for partitioned columns.")
-      .version("1.5.0")
       .booleanConf
       .createWithDefault(true)
 
   val BUCKETING_ENABLED = buildConf("spark.sql.sources.bucketing.enabled")
     .doc("When false, we will treat bucketed table as normal table")
-    .version("2.0.0")
     .booleanConf
     .createWithDefault(true)
 
   val BUCKETING_MAX_BUCKETS = buildConf("spark.sql.sources.bucketing.maxBuckets")
     .doc("The maximum number of buckets allowed.")
-    .version("2.4.0")
     .intConf
     .checkValue(_ > 0, "the value of spark.sql.sources.bucketing.maxBuckets must be greater than 0")
     .createWithDefault(100000)
@@ -940,42 +855,36 @@ object SQLConf {
     .internal()
     .doc("When false, we will throw an error if a query contains a cartesian product without " +
         "explicit CROSS JOIN syntax.")
-    .version("2.0.0")
     .booleanConf
     .createWithDefault(true)
 
   val ORDER_BY_ORDINAL = buildConf("spark.sql.orderByOrdinal")
     .doc("When true, the ordinal numbers are treated as the position in the select list. " +
          "When false, the ordinal numbers in order/sort by clause are ignored.")
-    .version("2.0.0")
     .booleanConf
     .createWithDefault(true)
 
   val GROUP_BY_ORDINAL = buildConf("spark.sql.groupByOrdinal")
     .doc("When true, the ordinal numbers in group by clauses are treated as the position " +
       "in the select list. When false, the ordinal numbers are ignored.")
-    .version("2.0.0")
     .booleanConf
     .createWithDefault(true)
 
   val GROUP_BY_ALIASES = buildConf("spark.sql.groupByAliases")
     .doc("When true, aliases in a select list can be used in group by clauses. When false, " +
       "an analysis exception is thrown in the case.")
-    .version("2.2.0")
     .booleanConf
     .createWithDefault(true)
 
   // The output committer class used by data sources. The specified class needs to be a
   // subclass of org.apache.hadoop.mapreduce.OutputCommitter.
   val OUTPUT_COMMITTER_CLASS = buildConf("spark.sql.sources.outputCommitterClass")
-    .version("1.4.0")
     .internal()
     .stringConf
     .createOptional
 
   val FILE_COMMIT_PROTOCOL_CLASS =
     buildConf("spark.sql.sources.commitProtocolClass")
-      .version("2.1.1")
       .internal()
       .stringConf
       .createWithDefault(
@@ -987,7 +896,6 @@ object SQLConf {
         "of detected paths exceeds this value during partition discovery, it tries to list the " +
         "files with another Spark distributed job. This configuration is effective only when " +
         "using file-based sources such as Parquet, JSON and ORC.")
-      .version("1.5.0")
       .intConf
       .checkValue(parallel => parallel >= 0, "The maximum number of paths allowed for listing " +
         "files at driver side must not be negative")
@@ -997,7 +905,6 @@ object SQLConf {
     buildConf("spark.sql.sources.parallelPartitionDiscovery.parallelism")
       .doc("The number of parallelism to list a collection of path recursively, Set the " +
         "number to prevent file listing from generating too many tasks.")
-      .version("2.1.1")
       .internal()
       .intConf
       .createWithDefault(10000)
@@ -1009,7 +916,6 @@ object SQLConf {
         "schedule tasks to take advantage of data locality. It can be particularly " +
         "useful if data is read from a remote cluster so the scheduler could never " +
         "take advantage of locality anyway.")
-      .version("3.0.0")
       .internal()
       .booleanConf
       .createWithDefault(false)
@@ -1018,7 +924,6 @@ object SQLConf {
   // See SPARK-6231.
   val DATAFRAME_SELF_JOIN_AUTO_RESOLVE_AMBIGUITY =
     buildConf("spark.sql.selfJoinAutoResolveAmbiguity")
-      .version("1.4.0")
       .internal()
       .booleanConf
       .createWithDefault(true)
@@ -1026,14 +931,12 @@ object SQLConf {
   val FAIL_AMBIGUOUS_SELF_JOIN_ENABLED =
     buildConf("spark.sql.analyzer.failAmbiguousSelfJoin")
       .doc("When true, fail the Dataset query if it contains ambiguous self-join.")
-      .version("3.0.0")
       .internal()
       .booleanConf
       .createWithDefault(true)
 
   // Whether to retain group by columns or not in GroupedData.agg.
   val DATAFRAME_RETAIN_GROUP_COLUMNS = buildConf("spark.sql.retainGroupColumns")
-    .version("1.4.0")
     .internal()
     .booleanConf
     .createWithDefault(true)
@@ -1041,14 +944,12 @@ object SQLConf {
   val DATAFRAME_PIVOT_MAX_VALUES = buildConf("spark.sql.pivotMaxValues")
     .doc("When doing a pivot without specifying values for the pivot column this is the maximum " +
       "number of (distinct) values that will be collected without error.")
-    .version("1.6.0")
     .intConf
     .createWithDefault(10000)
 
   val RUN_SQL_ON_FILES = buildConf("spark.sql.runSQLOnFiles")
     .internal()
     .doc("When true, we could use `datasource`.`path` as table in SQL query.")
-    .version("1.6.0")
     .booleanConf
     .createWithDefault(true)
 
@@ -1056,7 +957,6 @@ object SQLConf {
     .internal()
     .doc("When true, the whole stage (of multiple operators) will be compiled into single java" +
       " method.")
-    .version("2.0.0")
     .booleanConf
     .createWithDefault(true)
 
@@ -1065,7 +965,6 @@ object SQLConf {
     .internal()
     .doc("When true, embed the (whole-stage) codegen stage ID into " +
       "the class name of the generated class as a suffix")
-    .version("2.3.1")
     .booleanConf
     .createWithDefault(true)
 
@@ -1073,7 +972,6 @@ object SQLConf {
     .internal()
     .doc("The maximum number of fields (including nested fields) that will be supported before" +
       " deactivating whole-stage codegen.")
-    .version("2.0.0")
     .intConf
     .createWithDefault(100)
 
@@ -1083,7 +981,6 @@ object SQLConf {
       "interpreted if any compile error happens. Disabling fallback if `CODEGEN_ONLY`. " +
       "`NO_CODEGEN` skips codegen and goes interpreted path always. Note that " +
       "this config works only for tests.")
-    .version("2.4.0")
     .internal()
     .stringConf
     .checkValues(CodegenObjectFactoryMode.values.map(_.toString))
@@ -1093,14 +990,12 @@ object SQLConf {
     .internal()
     .doc("When true, (whole stage) codegen could be temporary disabled for the part of query that" +
       " fail to compile generated code")
-    .version("2.0.0")
     .booleanConf
     .createWithDefault(true)
 
   val CODEGEN_LOGGING_MAX_LINES = buildConf("spark.sql.codegen.logging.maxLines")
     .internal()
     .doc("The maximum number of codegen lines to log when errors occur. Use -1 for unlimited.")
-    .version("2.3.0")
     .intConf
     .checkValue(maxLines => maxLines >= -1, "The maximum must be a positive integer, 0 to " +
       "disable logging or -1 to apply no limit.")
@@ -1114,7 +1009,6 @@ object SQLConf {
       "is the largest bytecode size possible for a valid Java method. When running on HotSpot, " +
       s"it may be preferable to set the value to ${CodeGenerator.DEFAULT_JVM_HUGE_METHOD_LIMIT} " +
       "to match HotSpot's implementation.")
-    .version("2.3.0")
     .intConf
     .createWithDefault(65535)
 
@@ -1126,7 +1020,6 @@ object SQLConf {
       "generated, so use the code length as metric. When running on HotSpot, a function's " +
       "bytecode should not go beyond 8KB, otherwise it will not be JITted; it also should not " +
       "be too small, otherwise there will be many function calls.")
-    .version("3.0.0")
     .intConf
     .checkValue(threshold => threshold > 0, "The threshold must be a positive integer.")
     .createWithDefault(1024)
@@ -1137,7 +1030,6 @@ object SQLConf {
       .doc("When true, whole stage codegen would put the logic of consuming rows of each " +
         "physical operator into individual methods, instead of a single big method. This can be " +
         "used to avoid oversized function that can miss the opportunity of JIT optimization.")
-      .version("2.3.1")
       .booleanConf
       .createWithDefault(true)
 
@@ -1145,7 +1037,6 @@ object SQLConf {
     .doc("The maximum number of bytes to pack into a single partition when reading files. " +
       "This configuration is effective only when using file-based sources such as Parquet, JSON " +
       "and ORC.")
-    .version("2.0.0")
     .bytesConf(ByteUnit.BYTE)
     .createWithDefaultString("128MB") // parquet.block.size
 
@@ -1156,7 +1047,6 @@ object SQLConf {
       " over estimated, then the partitions with small files will be faster than partitions with" +
       " bigger files (which is scheduled first). This configuration is effective only when using" +
       " file-based sources such as Parquet, JSON and ORC.")
-    .version("2.0.0")
     .longConf
     .createWithDefault(4 * 1024 * 1024)
 
@@ -1165,7 +1055,6 @@ object SQLConf {
       "encountering corrupted files and the contents that have been read will still be returned. " +
       "This configuration is effective only when using file-based sources such as Parquet, JSON " +
       "and ORC.")
-    .version("2.1.1")
     .booleanConf
     .createWithDefault(false)
 
@@ -1174,28 +1063,24 @@ object SQLConf {
       "encountering missing files and the contents that have been read will still be returned. " +
       "This configuration is effective only when using file-based sources such as Parquet, JSON " +
       "and ORC.")
-    .version("2.3.0")
     .booleanConf
     .createWithDefault(false)
 
   val MAX_RECORDS_PER_FILE = buildConf("spark.sql.files.maxRecordsPerFile")
     .doc("Maximum number of records to write out to a single file. " +
       "If this value is zero or negative, there is no limit.")
-    .version("2.2.0")
     .longConf
     .createWithDefault(0)
 
   val EXCHANGE_REUSE_ENABLED = buildConf("spark.sql.exchange.reuse")
     .internal()
     .doc("When true, the planner will try to find out duplicated exchanges and re-use them.")
-    .version("2.0.0")
     .booleanConf
     .createWithDefault(true)
 
   val SUBQUERY_REUSE_ENABLED = buildConf("spark.sql.execution.reuseSubquery")
     .internal()
     .doc("When true, the planner will try to find out duplicated subqueries and re-use them.")
-    .version("3.0.0")
     .booleanConf
     .createWithDefault(true)
 
@@ -1207,7 +1092,6 @@ object SQLConf {
           "be a subclass of StateStoreProvider, and must have a zero-arg constructor. " +
           "Note: For structured streaming, this configuration cannot be changed between query " +
           "restarts from the same checkpoint location.")
-      .version("2.3.0")
       .stringConf
       .createWithDefault(
         "org.apache.spark.sql.execution.streaming.state.HDFSBackedStateStoreProvider")
@@ -1217,7 +1101,6 @@ object SQLConf {
       .internal()
       .doc("Minimum number of state store delta files that needs to be generated before they " +
         "consolidated into snapshots.")
-      .version("2.0.0")
       .intConf
       .createWithDefault(10)
 
@@ -1225,28 +1108,24 @@ object SQLConf {
     buildConf("spark.sql.streaming.flatMapGroupsWithState.stateFormatVersion")
       .internal()
       .doc("State format version used by flatMapGroupsWithState operation in a streaming query")
-      .version("2.4.0")
       .intConf
       .checkValue(v => Set(1, 2).contains(v), "Valid versions are 1 and 2")
       .createWithDefault(2)
 
   val CHECKPOINT_LOCATION = buildConf("spark.sql.streaming.checkpointLocation")
     .doc("The default location for storing checkpoint data for streaming queries.")
-    .version("2.0.0")
     .stringConf
     .createOptional
 
   val FORCE_DELETE_TEMP_CHECKPOINT_LOCATION =
     buildConf("spark.sql.streaming.forceDeleteTempCheckpointLocation")
       .doc("When true, enable temporary checkpoint locations force delete.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
   val MIN_BATCHES_TO_RETAIN = buildConf("spark.sql.streaming.minBatchesToRetain")
     .internal()
     .doc("The minimum number of batches that must be retained and made recoverable.")
-    .version("2.1.1")
     .intConf
     .createWithDefault(100)
 
@@ -1256,7 +1135,6 @@ object SQLConf {
       "loading from files. The value adjusts a trade-off between memory usage vs cache miss: " +
       "'2' covers both success and direct failure cases, '1' covers only success case, " +
       "and '0' covers extreme case - disable cache to maximize memory size of executors.")
-    .version("2.4.0")
     .intConf
     .createWithDefault(2)
 
@@ -1266,7 +1144,6 @@ object SQLConf {
       .doc("State format version used by streaming aggregation operations in a streaming query. " +
         "State between versions are tend to be incompatible, so state format version shouldn't " +
         "be modified after running.")
-      .version("2.4.0")
       .intConf
       .checkValue(v => Set(1, 2).contains(v), "Valid versions are 1 and 2")
       .createWithDefault(2)
@@ -1277,7 +1154,6 @@ object SQLConf {
       "If we find a concurrent active run for a streaming query (in the same or different " +
       "SparkSessions on the same cluster) and this flag is true, we will stop the old streaming " +
       "query run to start the new one.")
-    .version("3.0.0")
     .booleanConf
     .createWithDefault(true)
 
@@ -1287,7 +1163,6 @@ object SQLConf {
       .doc("State format version used by streaming join operations in a streaming query. " +
         "State between versions are tend to be incompatible, so state format version shouldn't " +
         "be modified after running.")
-      .version("3.0.0")
       .intConf
       .checkValue(v => Set(1, 2).contains(v), "Valid versions are 1 and 2")
       .createWithDefault(2)
@@ -1297,7 +1172,6 @@ object SQLConf {
       .internal()
       .doc("When true, the logical plan for streaming query will be checked for unsupported" +
         " operations.")
-      .version("2.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -1305,7 +1179,6 @@ object SQLConf {
     buildConf("spark.sql.variable.substitute")
       .doc("This enables substitution using syntax like `${var}`, `${system:var}`, " +
         "and `${env:var}`.")
-      .version("2.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -1316,7 +1189,6 @@ object SQLConf {
         "inserted/looked-up at a 1st-level, small, fast map, and then fallback to a " +
         "2nd-level, larger, slower map when 1st level is full or keys cannot be found. " +
         "When disabled, records go directly to the 2nd level.")
-      .version("2.3.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -1324,7 +1196,6 @@ object SQLConf {
     buildConf("spark.sql.codegen.aggregate.map.vectorized.enable")
       .internal()
       .doc("Enable vectorized aggregate hash map. This is for testing/benchmarking only.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -1334,7 +1205,6 @@ object SQLConf {
       .doc("When true, the code generator would split aggregate code into individual methods " +
         "instead of a single big method. This can be used to avoid oversized function that " +
         "can miss the opportunity of JIT optimization.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -1346,7 +1216,6 @@ object SQLConf {
         "However the DAG depth may become too large and cause unexpected behavior. This " +
         "configuration puts a limit on this: when the depth of a view exceeds this value during " +
         "analysis, we terminate the resolution to avoid potential errors.")
-      .version("2.2.0")
       .intConf
       .checkValue(depth => depth > 0, "The maximum depth of a view reference in a nested view " +
         "must be positive.")
@@ -1354,7 +1223,6 @@ object SQLConf {
 
   val STREAMING_FILE_COMMIT_PROTOCOL_CLASS =
     buildConf("spark.sql.streaming.commitProtocolClass")
-      .version("2.1.0")
       .internal()
       .stringConf
       .createWithDefault("org.apache.spark.sql.execution.streaming.ManifestFileCommitProtocol")
@@ -1367,7 +1235,6 @@ object SQLConf {
         "'max' which chooses the maximum across multiple operators. " +
         "Note: This configuration cannot be changed between query restarts from the same " +
         "checkpoint location.")
-      .version("2.4.0")
       .stringConf
       .transform(_.toLowerCase(Locale.ROOT))
       .checkValue(
@@ -1382,7 +1249,6 @@ object SQLConf {
       .doc("In the case of ObjectHashAggregateExec, when the size of the in-memory hash map " +
         "grows too large, we will fall back to sort-based aggregation. This option sets a row " +
         "count threshold for the size of the hash map.")
-      .version("2.2.0")
       .intConf
       // We are trying to be conservative and use a relatively small default count threshold here
       // since the state object of some TypedImperativeAggregate function can be quite large (e.g.
@@ -1392,7 +1258,6 @@ object SQLConf {
   val USE_OBJECT_HASH_AGG = buildConf("spark.sql.execution.useObjectHashAggregateExec")
     .internal()
     .doc("Decides if we use ObjectHashAggregateExec")
-    .version("2.2.0")
     .booleanConf
     .createWithDefault(true)
 
@@ -1401,14 +1266,12 @@ object SQLConf {
       .doc("Whether to ignore null fields when generating JSON objects in JSON data source and " +
         "JSON functions such as to_json. " +
         "If false, it generates null for null fields in JSON objects.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
   val FILE_SINK_LOG_DELETION = buildConf("spark.sql.streaming.fileSink.log.deletion")
     .internal()
     .doc("Whether to delete the expired log files in file stream sink.")
-    .version("2.0.0")
     .booleanConf
     .createWithDefault(true)
 
@@ -1417,7 +1280,6 @@ object SQLConf {
       .internal()
       .doc("Number of log files after which all the previous files " +
         "are compacted into the next log file.")
-      .version("2.0.0")
       .intConf
       .createWithDefault(10)
 
@@ -1425,14 +1287,12 @@ object SQLConf {
     buildConf("spark.sql.streaming.fileSink.log.cleanupDelay")
       .internal()
       .doc("How long that a file is guaranteed to be visible for all readers.")
-      .version("2.0.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .createWithDefault(TimeUnit.MINUTES.toMillis(10)) // 10 minutes
 
   val FILE_SOURCE_LOG_DELETION = buildConf("spark.sql.streaming.fileSource.log.deletion")
     .internal()
     .doc("Whether to delete the expired log files in file stream source.")
-    .version("2.0.1")
     .booleanConf
     .createWithDefault(true)
 
@@ -1441,7 +1301,6 @@ object SQLConf {
       .internal()
       .doc("Number of log files after which all the previous files " +
         "are compacted into the next log file.")
-      .version("2.0.1")
       .intConf
       .createWithDefault(10)
 
@@ -1449,7 +1308,6 @@ object SQLConf {
     buildConf("spark.sql.streaming.fileSource.log.cleanupDelay")
       .internal()
       .doc("How long in milliseconds a file is guaranteed to be visible for all readers.")
-      .version("2.0.1")
       .timeConf(TimeUnit.MILLISECONDS)
       .createWithDefault(TimeUnit.MINUTES.toMillis(10)) // 10 minutes
 
@@ -1459,14 +1317,12 @@ object SQLConf {
       .doc("When true, force the schema of streaming file source to be nullable (including all " +
         "the fields). Otherwise, the schema might not be compatible with actual data, which " +
         "leads to corruptions.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
   val FILE_SOURCE_CLEANER_NUM_THREADS =
     buildConf("spark.sql.streaming.fileSource.cleaner.numThreads")
       .doc("Number of threads used in the file source completed file cleaner.")
-      .version("3.0.0")
       .intConf
       .createWithDefault(1)
 
@@ -1474,7 +1330,6 @@ object SQLConf {
     buildConf("spark.sql.streaming.schemaInference")
       .internal()
       .doc("Whether file-based streaming sources will infer its own schema")
-      .version("2.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -1482,7 +1337,6 @@ object SQLConf {
     buildConf("spark.sql.streaming.pollingDelay")
       .internal()
       .doc("How long to delay polling new data when no data is available")
-      .version("2.0.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .createWithDefault(10L)
 
@@ -1490,7 +1344,6 @@ object SQLConf {
     buildConf("spark.sql.streaming.stopTimeout")
       .doc("How long to wait in milliseconds for the streaming execution thread to stop when " +
         "calling the streaming query's stop() method. 0 or negative values wait indefinitely.")
-      .version("3.0.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .createWithDefaultString("0")
 
@@ -1498,7 +1351,6 @@ object SQLConf {
     buildConf("spark.sql.streaming.noDataProgressEventInterval")
       .internal()
       .doc("How long to wait between two progress events when there is no data")
-      .version("2.1.1")
       .timeConf(TimeUnit.MILLISECONDS)
       .createWithDefault(10000L)
 
@@ -1507,21 +1359,18 @@ object SQLConf {
       .doc(
         "Whether streaming micro-batch engine will execute batches without data " +
           "for eager state management for stateful streaming queries.")
-      .version("2.4.1")
       .booleanConf
       .createWithDefault(true)
 
   val STREAMING_METRICS_ENABLED =
     buildConf("spark.sql.streaming.metricsEnabled")
       .doc("Whether Dropwizard/Codahale metrics will be reported for active streaming queries.")
-      .version("2.0.2")
       .booleanConf
       .createWithDefault(false)
 
   val STREAMING_PROGRESS_RETENTION =
     buildConf("spark.sql.streaming.numRecentProgressUpdates")
       .doc("The number of progress updates to retain for a streaming query")
-      .version("2.1.1")
       .intConf
       .createWithDefault(100)
 
@@ -1529,7 +1378,6 @@ object SQLConf {
     buildConf("spark.sql.streaming.checkpointFileManagerClass")
       .doc("The class used to write checkpoint files atomically. This class must be a subclass " +
         "of the interface CheckpointFileManager.")
-      .version("2.4.0")
       .internal()
       .stringConf
 
@@ -1537,7 +1385,6 @@ object SQLConf {
     buildConf("spark.sql.streaming.checkpoint.escapedPathCheck.enabled")
       .doc("Whether to detect a streaming query may pick up an incorrect checkpoint path due " +
         "to SPARK-26824.")
-      .version("3.0.0")
       .internal()
       .booleanConf
       .createWithDefault(true)
@@ -1548,7 +1395,6 @@ object SQLConf {
       .doc("When true, SQL commands use parallel file listing, " +
         "as opposed to single thread listing. " +
         "This usually speeds up commands that need to list many directories.")
-      .version("2.4.1")
       .booleanConf
       .createWithDefault(true)
 
@@ -1558,7 +1404,6 @@ object SQLConf {
       s"which is larger than `${AUTO_BROADCASTJOIN_THRESHOLD.key}` to be more conservative. " +
       "That is to say by default the optimizer will not choose to broadcast a table unless it " +
       "knows for sure its size is small enough.")
-    .version("1.1.0")
     .bytesConf(ByteUnit.BYTE)
     .createWithDefault(Long.MaxValue)
 
@@ -1569,7 +1414,6 @@ object SQLConf {
       "For non-partitioned data source tables, it will be automatically recalculated if table " +
       "statistics are not available. For partitioned data source and partitioned Hive tables, " +
       s"It is '${DEFAULT_SIZE_IN_BYTES.key}' if table statistics are not available.")
-    .version("2.0.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -1578,7 +1422,6 @@ object SQLConf {
       .internal()
       .doc("The maximum estimation error allowed in HyperLogLog++ algorithm when generating " +
         "column level statistics.")
-      .version("2.1.1")
       .doubleConf
       .createWithDefault(0.05)
 
@@ -1589,7 +1432,6 @@ object SQLConf {
         "histogram. Note that collecting histograms takes extra cost. For example, collecting " +
         "column statistics usually takes only one table scan, but generating equi-height " +
         "histogram will cause an extra table scan.")
-      .version("2.3.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -1597,7 +1439,6 @@ object SQLConf {
     buildConf("spark.sql.statistics.histogram.numBins")
       .internal()
       .doc("The number of bins when generating histograms.")
-      .version("2.3.0")
       .intConf
       .checkValue(num => num > 1, "The number of bins must be greater than 1.")
       .createWithDefault(254)
@@ -1608,7 +1449,6 @@ object SQLConf {
       .doc("Accuracy of percentile approximation when generating equi-height histograms. " +
         "Larger value means better accuracy. The relative error can be deduced by " +
         "1.0 / PERCENTILE_ACCURACY.")
-      .version("2.3.0")
       .intConf
       .createWithDefault(10000)
 
@@ -1617,35 +1457,30 @@ object SQLConf {
       .doc("Enables automatic update for table size once table's data is changed. Note that if " +
         "the total number of files of the table is very large, this can be expensive and slow " +
         "down data change commands.")
-      .version("2.3.0")
       .booleanConf
       .createWithDefault(false)
 
   val CBO_ENABLED =
     buildConf("spark.sql.cbo.enabled")
       .doc("Enables CBO for estimation of plan statistics when set true.")
-      .version("2.2.0")
       .booleanConf
       .createWithDefault(false)
 
   val PLAN_STATS_ENABLED =
     buildConf("spark.sql.cbo.planStats.enabled")
       .doc("When true, the logical plan will fetch row counts and column statistics from catalog.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
   val JOIN_REORDER_ENABLED =
     buildConf("spark.sql.cbo.joinReorder.enabled")
       .doc("Enables join reorder in CBO.")
-      .version("2.2.0")
       .booleanConf
       .createWithDefault(false)
 
   val JOIN_REORDER_DP_THRESHOLD =
     buildConf("spark.sql.cbo.joinReorder.dp.threshold")
       .doc("The maximum number of joined nodes allowed in the dynamic programming algorithm.")
-      .version("2.2.0")
       .intConf
       .checkValue(number => number > 0, "The maximum number must be a positive integer.")
       .createWithDefault(12)
@@ -1655,7 +1490,6 @@ object SQLConf {
       .internal()
       .doc("The weight of cardinality (number of rows) for plan cost comparison in join reorder: " +
         "rows * weight + size * (1 - weight).")
-      .version("2.2.0")
       .doubleConf
       .checkValue(weight => weight >= 0 && weight <= 1, "The weight value must be in [0, 1].")
       .createWithDefault(0.7)
@@ -1663,13 +1497,11 @@ object SQLConf {
   val JOIN_REORDER_DP_STAR_FILTER =
     buildConf("spark.sql.cbo.joinReorder.dp.star.filter")
       .doc("Applies star-join filter heuristics to cost based join enumeration.")
-      .version("2.2.0")
       .booleanConf
       .createWithDefault(false)
 
   val STARSCHEMA_DETECTION = buildConf("spark.sql.cbo.starSchemaDetection")
     .doc("When true, it enables join reordering based on star schema detection. ")
-    .version("2.2.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -1677,7 +1509,6 @@ object SQLConf {
     .internal()
     .doc("Specifies the upper limit of the ratio between the largest fact tables" +
       " for a star join to be considered. ")
-    .version("2.2.0")
     .doubleConf
     .createWithDefault(0.9)
 
@@ -1691,7 +1522,6 @@ object SQLConf {
       "Zone offsets must be in the format '(+|-)HH:mm', for example '-08:00' or '+01:00'. " +
       "Also 'UTC' and 'Z' are supported as aliases of '+00:00'. Other short names are not " +
       "recommended to use because they can be ambiguous.")
-    .version("2.2.0")
     .stringConf
     .checkValue(isValidTimezone, s"Cannot resolve the given timezone with" +
       " ZoneId.of(_, ZoneId.SHORT_IDS)")
@@ -1701,7 +1531,6 @@ object SQLConf {
     buildConf("spark.sql.windowExec.buffer.in.memory.threshold")
       .internal()
       .doc("Threshold for number of rows guaranteed to be held in memory by the window operator")
-      .version("2.2.1")
       .intConf
       .createWithDefault(4096)
 
@@ -1709,7 +1538,6 @@ object SQLConf {
     buildConf("spark.sql.windowExec.buffer.spill.threshold")
       .internal()
       .doc("Threshold for number of rows to be spilled by window operator")
-      .version("2.2.0")
       .intConf
       .createWithDefault(SHUFFLE_SPILL_NUM_ELEMENTS_FORCE_SPILL_THRESHOLD.defaultValue.get)
 
@@ -1718,7 +1546,6 @@ object SQLConf {
       .internal()
       .doc("Threshold for number of rows guaranteed to be held in memory by the sort merge " +
         "join operator")
-      .version("2.2.1")
       .intConf
       .createWithDefault(ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH)
 
@@ -1726,7 +1553,6 @@ object SQLConf {
     buildConf("spark.sql.sortMergeJoinExec.buffer.spill.threshold")
       .internal()
       .doc("Threshold for number of rows to be spilled by sort merge join operator")
-      .version("2.2.0")
       .intConf
       .createWithDefault(SHUFFLE_SPILL_NUM_ELEMENTS_FORCE_SPILL_THRESHOLD.defaultValue.get)
 
@@ -1735,7 +1561,6 @@ object SQLConf {
       .internal()
       .doc("Threshold for number of rows guaranteed to be held in memory by the cartesian " +
         "product operator")
-      .version("2.2.1")
       .intConf
       .createWithDefault(4096)
 
@@ -1743,14 +1568,12 @@ object SQLConf {
     buildConf("spark.sql.cartesianProductExec.buffer.spill.threshold")
       .internal()
       .doc("Threshold for number of rows to be spilled by cartesian product operator")
-      .version("2.2.0")
       .intConf
       .createWithDefault(SHUFFLE_SPILL_NUM_ELEMENTS_FORCE_SPILL_THRESHOLD.defaultValue.get)
 
   val SUPPORT_QUOTED_REGEX_COLUMN_NAME = buildConf("spark.sql.parser.quotedRegexColumnNames")
     .doc("When true, quoted Identifiers (using backticks) in SELECT statement are interpreted" +
       " as regular expressions.")
-    .version("2.3.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -1759,14 +1582,12 @@ object SQLConf {
       .internal()
       .doc("Number of points to sample per partition in order to determine the range boundaries" +
           " for range partitioning, typically used in global sorting (without limit).")
-      .version("2.3.0")
       .intConf
       .createWithDefault(100)
 
   val ARROW_EXECUTION_ENABLED =
     buildConf("spark.sql.execution.arrow.enabled")
       .doc("(Deprecated since Spark 3.0, please set 'spark.sql.execution.arrow.pyspark.enabled'.)")
-      .version("2.3.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -1778,7 +1599,6 @@ object SQLConf {
         "2. pyspark.sql.SparkSession.createDataFrame when its input is a Pandas DataFrame " +
         "The following data types are unsupported: " +
         "BinaryType, MapType, ArrayType of TimestampType, and nested StructType.")
-      .version("3.0.0")
       .fallbackConf(ARROW_EXECUTION_ENABLED)
 
   val ARROW_SPARKR_EXECUTION_ENABLED =
@@ -1791,7 +1611,6 @@ object SQLConf {
         "4. gapply " +
         "The following data types are unsupported: " +
         "FloatType, BinaryType, ArrayType, StructType and MapType.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -1799,7 +1618,6 @@ object SQLConf {
     buildConf("spark.sql.execution.arrow.fallback.enabled")
       .doc("(Deprecated since Spark 3.0, please set " +
         "'spark.sql.execution.arrow.pyspark.fallback.enabled'.)")
-      .version("2.4.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -1807,14 +1625,12 @@ object SQLConf {
     buildConf("spark.sql.execution.arrow.pyspark.fallback.enabled")
       .doc(s"When true, optimizations enabled by '${ARROW_PYSPARK_EXECUTION_ENABLED.key}' will " +
         "fallback automatically to non-optimized implementations if an error occurs.")
-      .version("3.0.0")
       .fallbackConf(ARROW_FALLBACK_ENABLED)
 
   val ARROW_EXECUTION_MAX_RECORDS_PER_BATCH =
     buildConf("spark.sql.execution.arrow.maxRecordsPerBatch")
       .doc("When using Apache Arrow, limit the maximum number of records that can be written " +
         "to a single ArrowRecordBatch in memory. If set to zero or negative there is no limit.")
-      .version("2.3.0")
       .intConf
       .createWithDefault(10000)
 
@@ -1825,7 +1641,6 @@ object SQLConf {
         s"set, the fallback is `${BUFFER_SIZE.key}`. Note that Pandas execution requires more " +
         "than 4 bytes. Lowering this value could make small Pandas UDF batch iterated and " +
         "pipelined; however, it might degrade performance. See SPARK-27870.")
-      .version("3.0.0")
       .fallbackConf(BUFFER_SIZE)
 
   val PANDAS_GROUPED_MAP_ASSIGN_COLUMNS_BY_NAME =
@@ -1835,7 +1650,6 @@ object SQLConf {
         "to use position if not. When false, a grouped map Pandas UDF will assign columns from " +
         "the returned Pandas DataFrame based on position, regardless of column label type. " +
         "This configuration will be deprecated in future releases.")
-      .version("2.4.1")
       .booleanConf
       .createWithDefault(true)
 
@@ -1846,7 +1660,6 @@ object SQLConf {
         "Pandas.Series to Arrow array during serialization. Arrow will raise errors " +
         "when detecting unsafe type conversion like overflow. When false, disabling Arrow's type " +
         "check and do type conversions anyway. This config only works for Arrow 0.11.0+.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -1860,7 +1673,6 @@ object SQLConf {
       " column names of the left node are distinct. If all the conditions are met, the" +
       " rule will replace the except operation with a Filter by flipping the filter" +
       " condition(s) of the right node.")
-    .version("2.3.0")
     .booleanConf
     .createWithDefault(true)
 
@@ -1871,7 +1683,6 @@ object SQLConf {
         "happens according to Hive behavior and SQL ANSI 2011 specification, ie. rounding the " +
         "decimal part of the result if an exact representation is not possible. Otherwise, NULL " +
         "is returned in those cases, as previously.")
-      .version("2.3.1")
       .booleanConf
       .createWithDefault(true)
 
@@ -1881,7 +1692,6 @@ object SQLConf {
       .doc("When integral literal is used in decimal operations, pick a minimum precision " +
         "required by the literal if this config is true, to make the resulting precision and/or " +
         "scale smaller. This can reduce the possibility of precision lose and/or overflow.")
-      .version("2.3.3")
       .booleanConf
       .createWithDefault(true)
 
@@ -1891,7 +1701,6 @@ object SQLConf {
         "information. The values of options whose names that match this regex will be redacted " +
         "in the explain output. This redaction is applied on top of the global redaction " +
         s"configuration defined by ${SECRET_REDACTION_PATTERN.key}.")
-    .version("2.2.2")
     .regexConf
     .createWithDefault("(?i)url".r)
 
@@ -1901,20 +1710,17 @@ object SQLConf {
         "information. When this regex matches a string part, that string part is replaced by a " +
         "dummy value. This is currently used to redact the output of SQL explain commands. " +
         "When this conf is not set, the value from `spark.redaction.string.regex` is used.")
-      .version("2.3.0")
       .fallbackConf(org.apache.spark.internal.config.STRING_REDACTION_PATTERN)
 
   val CONCAT_BINARY_AS_STRING = buildConf("spark.sql.function.concatBinaryAsString")
     .doc("When this option is set to false and all inputs are binary, `functions.concat` returns " +
       "an output as binary. Otherwise, it returns as a string.")
-    .version("2.3.0")
     .booleanConf
     .createWithDefault(false)
 
   val ELT_OUTPUT_AS_STRING = buildConf("spark.sql.function.eltOutputAsString")
     .doc("When this option is set to false and all inputs are binary, `elt` returns " +
       "an output as binary. Otherwise, it returns as a string.")
-    .version("2.3.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -1925,7 +1731,6 @@ object SQLConf {
         "user-specified schema. If the validation fails, a runtime exception is thrown. " +
         "When this option is set to false, the partition column value will be converted to null " +
         "if it can not be casted to corresponding user-specified schema.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -1933,7 +1738,6 @@ object SQLConf {
     buildConf("spark.sql.streaming.continuous.epochBacklogQueueSize")
       .doc("The max number of entries to be stored in queue to wait for late epochs. " +
         "If this parameter is exceeded by the size of the queue, stream will stop with an error.")
-      .version("3.0.0")
       .intConf
       .createWithDefault(10000)
 
@@ -1942,7 +1746,6 @@ object SQLConf {
     .internal()
     .doc("The size (measured in number of rows) of the queue used in continuous execution to" +
       " buffer the results of a ContinuousDataReader.")
-    .version("2.3.0")
     .intConf
     .createWithDefault(1024)
 
@@ -1951,7 +1754,6 @@ object SQLConf {
       .internal()
       .doc("The interval at which continuous execution readers will poll to check whether" +
         " the epoch has advanced on the driver.")
-      .version("2.3.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .createWithDefault(100)
 
@@ -1960,14 +1762,12 @@ object SQLConf {
     .doc("A comma-separated list of data source short names or fully qualified data source " +
       "implementation class names for which Data Source V2 code path is disabled. These data " +
       "sources will fallback to Data Source V1 code path.")
-    .version("3.0.0")
     .stringConf
     .createWithDefault("avro,csv,json,kafka,orc,parquet,text")
 
   val DISABLED_V2_STREAMING_WRITERS = buildConf("spark.sql.streaming.disabledV2Writers")
     .doc("A comma-separated list of fully qualified data source register class names for which" +
       " StreamWriteSupport is disabled. Writes to these sources will fall back to the V1 Sinks.")
-    .version("2.3.1")
     .stringConf
     .createWithDefault("")
 
@@ -1978,7 +1778,6 @@ object SQLConf {
         "A comma-separated list of fully qualified data source register class names for which " +
           "MicroBatchReadSupport is disabled. Reads from these sources will fall back to the " +
           "V1 Sources.")
-      .version("2.4.0")
       .stringConf
       .createWithDefault("")
 
@@ -1999,7 +1798,6 @@ object SQLConf {
         "(which takes precedence over this setting), e.g. " +
         "dataframe.write.option(\"partitionOverwriteMode\", \"dynamic\").save(path)."
       )
-      .version("2.3.0")
       .stringConf
       .transform(_.toUpperCase(Locale.ROOT))
       .checkValues(PartitionOverwriteMode.values.map(_.toString))
@@ -2024,7 +1822,6 @@ object SQLConf {
         "in type coercion, e.g. converting `double` to `int` or `decimal` to `double` is " +
         "not allowed."
       )
-      .version("3.0.0")
       .stringConf
       .transform(_.toUpperCase(Locale.ROOT))
       .checkValues(StoreAssignmentPolicy.values.map(_.toString))
@@ -2035,7 +1832,6 @@ object SQLConf {
       "throw a runtime exception if an overflow occurs in any operation on integral/decimal " +
       "field. 2. Spark will forbid using the reserved keywords of ANSI SQL as identifiers in " +
       "the SQL parser.")
-    .version("3.0.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -2048,7 +1844,6 @@ object SQLConf {
         "issues. Turn on this config to insert a local sort before actually doing repartition " +
         "to generate consistent repartition results. The performance of repartition() may go " +
         "down since we insert extra local sort before it.")
-        .version("2.1.4")
         .booleanConf
         .createWithDefault(true)
 
@@ -2059,7 +1854,6 @@ object SQLConf {
         "satisfying a query. This optimization allows columnar file format readers to avoid " +
         "reading unnecessary nested column data. Currently Parquet and ORC are the " +
         "data sources that implement this optimization.")
-      .version("2.4.1")
       .booleanConf
       .createWithDefault(true)
 
@@ -2070,7 +1864,6 @@ object SQLConf {
         "containing `dots` to data sources. Currently, Parquet implements both optimizations " +
         "while ORC only supports predicates for names containing `dots`. The other data sources" +
         "don't support this feature yet.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -2080,7 +1873,6 @@ object SQLConf {
       .doc("Prune nested fields from object serialization operator which are unnecessary in " +
         "satisfying a query. This optimization allows object serializers to avoid " +
         "executing unnecessary nested expressions.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -2091,7 +1883,6 @@ object SQLConf {
         "satisfying a query. Note that this optimization doesn't prune nested fields from " +
         "physical data source scanning. For pruning nested fields from scanning, please use " +
         "`spark.sql.optimizer.nestedSchemaPruning.enabled` config.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -2101,7 +1892,6 @@ object SQLConf {
       .doc("In SQL queries with a SORT followed by a LIMIT like " +
           "'SELECT x FROM t ORDER BY y LIMIT m', if m is under this threshold, do a top-K sort" +
           " in memory, otherwise do a global sort which spills to disk if necessary.")
-      .version("2.4.0")
       .intConf
       .createWithDefault(ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH)
 
@@ -2117,7 +1907,6 @@ object SQLConf {
     .internal()
     .doc("If it is set to true, column names of the requested schema are passed to CSV parser. " +
       "Other column values can be ignored during parsing even if they are malformed.")
-    .version("2.4.0")
     .booleanConf
     .createWithDefault(true)
 
@@ -2128,7 +1917,6 @@ object SQLConf {
       "Jupyter, the HTML table (generated by _repr_html_) will be returned. For plain Python " +
       "REPL, the returned outputs are formatted like dataframe.show(). In SparkR, the returned " +
       "outputs are showed similar to R data.frame would.")
-    .version("2.4.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -2137,14 +1925,12 @@ object SQLConf {
       s"effect when ${REPL_EAGER_EVAL_ENABLED.key} is set to true. The valid range of this " +
       "config is from 0 to (Int.MaxValue - 1), so the invalid config like negative and " +
       "greater than (Int.MaxValue - 1) will be normalized to 0 and (Int.MaxValue - 1).")
-    .version("2.4.0")
     .intConf
     .createWithDefault(20)
 
   val REPL_EAGER_EVAL_TRUNCATE = buildConf("spark.sql.repl.eagerEval.truncate")
     .doc("The max number of characters for each cell that is returned by eager evaluation. " +
       s"This only takes effect when ${REPL_EAGER_EVAL_ENABLED.key} is set to true.")
-    .version("2.4.0")
     .intConf
     .createWithDefault(20)
 
@@ -2155,7 +1941,6 @@ object SQLConf {
         "by the fast hash aggregate product operator. The bit is not for actual value, " +
         "but the actual numBuckets is determined by loadFactor " +
         "(e.g: default bit value 16 , the actual numBuckets is ((1 << 16) / 0.5).")
-      .version("2.4.0")
       .intConf
       .checkValue(bit => bit >= 10 && bit <= 30, "The bit value must be in [10, 30].")
       .createWithDefault(16)
@@ -2163,7 +1948,6 @@ object SQLConf {
   val AVRO_COMPRESSION_CODEC = buildConf("spark.sql.avro.compression.codec")
     .doc("Compression codec used in writing of AVRO files. Supported codecs: " +
       "uncompressed, deflate, snappy, bzip2 and xz. Default codec is snappy.")
-    .version("2.4.0")
     .stringConf
     .checkValues(Set("uncompressed", "deflate", "snappy", "bzip2", "xz"))
     .createWithDefault("snappy")
@@ -2172,7 +1956,6 @@ object SQLConf {
     .doc("Compression level for the deflate codec used in writing of AVRO files. " +
       "Valid value must be in the range of from 1 to 9 inclusive or -1. " +
       "The default value is -1 which corresponds to 6 level in the current implementation.")
-    .version("2.4.0")
     .intConf
     .checkValues((1 to 9).toSet + Deflater.DEFAULT_COMPRESSION)
     .createWithDefault(Deflater.DEFAULT_COMPRESSION)
@@ -2181,7 +1964,6 @@ object SQLConf {
     .internal()
     .doc(s"If it is set to false, or ${ANSI_ENABLED.key} is true, then size of null returns " +
       "null. Otherwise, it returns -1, which was inherited from Hive.")
-    .version("2.4.0")
     .booleanConf
     .createWithDefault(true)
 
@@ -2190,7 +1972,6 @@ object SQLConf {
       .internal()
       .doc("If it is set to true, the data source provider com.databricks.spark.avro is mapped " +
         "to the built-in but external Avro data source module for backward compatibility.")
-      .version("2.4.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -2201,7 +1982,6 @@ object SQLConf {
         "set operations are performed from left to right as they appear in the query. When set " +
         "to false and order of evaluation is not specified by parentheses, INTERSECT operations " +
         "are performed before any UNION, EXCEPT and MINUS operations.")
-      .version("2.4.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -2210,7 +1990,6 @@ object SQLConf {
       .internal()
       .doc("When set to true, a literal with an exponent (e.g. 1E-30) would be parsed " +
         "as Decimal rather than Double.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -2220,7 +1999,6 @@ object SQLConf {
       .doc("When set to true, negative scale of Decimal type is allowed. For example, " +
         "the type of number 1E10BD under legacy mode is DecimalType(2, -9), but is " +
         "Decimal(11, 0) in non legacy mode.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -2229,7 +2007,6 @@ object SQLConf {
       .internal()
       .doc("When set to true, CREATE TABLE syntax without a provider will use hive " +
         s"instead of the value of ${DEFAULT_DATA_SOURCE_NAME.key}.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -2238,16 +2015,14 @@ object SQLConf {
       .internal()
       .doc("When true, the bucketed table scan will list files during planning to figure out the " +
         "output ordering, which is expensive and may make the planning quite slow.")
-      .version("3.0.0")
-      .booleanConf
-      .createWithDefault(false)
+    .booleanConf
+    .createWithDefault(false)
 
   val LEGACY_HAVING_WITHOUT_GROUP_BY_AS_WHERE =
     buildConf("spark.sql.legacy.parser.havingWithoutGroupByAsWhere")
       .internal()
       .doc("If it is set to true, the parser will treat HAVING without GROUP BY as a normal " +
         "WHERE, which does not follow SQL standard.")
-      .version("2.4.1")
       .booleanConf
       .createWithDefault(false)
 
@@ -2255,8 +2030,7 @@ object SQLConf {
     buildConf("spark.sql.legacy.json.allowEmptyString.enabled")
       .internal()
       .doc("When set to true, the parser of JSON data source treats empty strings as null for " +
-        "some data types such as `IntegerType`.")
-      .version("3.0.0")
+      "some data types such as `IntegerType`.")
       .booleanConf
       .createWithDefault(false)
 
@@ -2266,7 +2040,6 @@ object SQLConf {
       .doc("When set to true, Spark returns an empty collection with `StringType` as element " +
         "type if the `array`/`map` function is called without any parameters. Otherwise, Spark " +
         "returns an empty collection with `NullType` as element type.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -2275,7 +2048,6 @@ object SQLConf {
       .internal()
       .doc("When set to true, user is allowed to use org.apache.spark.sql.functions." +
         "udf(f: AnyRef, dataType: DataType). Otherwise, an exception will be thrown at runtime.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -2284,7 +2056,6 @@ object SQLConf {
       .internal()
       .doc("When set to true, TRUNCATE TABLE command will not try to set back original " +
         "permission and ACLs when re-creating the table/partition paths.")
-      .version("2.4.6")
       .booleanConf
       .createWithDefault(false)
 
@@ -2294,7 +2065,6 @@ object SQLConf {
       .doc("When set to true, the key attribute resulted from running `Dataset.groupByKey` " +
         "for non-struct key type, will be named as `value`, following the behavior of Spark " +
         "version 2.4 and earlier.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -2302,7 +2072,6 @@ object SQLConf {
     .doc("Maximum number of fields of sequence-like entries can be converted to strings " +
       "in debug output. Any elements beyond the limit will be dropped and replaced by a" +
       """ "... N more fields" placeholder.""")
-    .version("3.0.0")
     .intConf
     .createWithDefault(25)
 
@@ -2311,7 +2080,6 @@ object SQLConf {
       "longer, further output will be truncated.  The default setting always generates a full " +
       "plan.  Set this to a lower value such as 8k if plan strings are taking up too much " +
       "memory or are causing OutOfMemory errors in the driver or UI processes.")
-    .version("3.0.0")
     .bytesConf(ByteUnit.BYTE)
     .checkValue(i => i >= 0 && i <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH, "Invalid " +
       "value for 'spark.sql.maxPlanStringLength'.  Length must be a valid string length " +
@@ -2323,7 +2091,6 @@ object SQLConf {
       .internal()
       .doc("If it is set to true, SET command will fail when the key is registered as " +
         "a SparkConf entry.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -2332,7 +2099,6 @@ object SQLConf {
       "java.time.LocalDate classes of Java 8 API are used as external types for " +
       "Catalyst's TimestampType and DateType. If it is set to false, java.sql.Timestamp " +
       "and java.sql.Date are used for the same purpose.")
-    .version("3.0.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -2340,7 +2106,6 @@ object SQLConf {
     .doc("The max length of a file that can be read by the binary file data source. " +
       "Spark will fail fast and not attempt to read the file if its length exceeds this value. " +
       "The theoretical max is Int.MaxValue, though VMs might implement a smaller max.")
-    .version("3.0.0")
     .internal()
     .intConf
     .createWithDefault(Int.MaxValue)
@@ -2350,14 +2115,12 @@ object SQLConf {
       .internal()
       .doc("If it is set to true, date/timestamp will cast to string in binary comparisons " +
         "with String")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
   val DEFAULT_CATALOG = buildConf("spark.sql.defaultCatalog")
     .doc("Name of the default catalog. This will be the current catalog if users have not " +
       "explicitly set the current catalog yet.")
-    .version("3.0.0")
     .stringConf
     .createWithDefault(SESSION_CATALOG_NAME)
 
@@ -2369,7 +2132,6 @@ object SQLConf {
         s"be loaded by the $SESSION_CATALOG_NAME, this catalog must also return the table " +
         s"metadata. To delegate operations to the $SESSION_CATALOG_NAME, implementations can " +
         "extend 'CatalogExtension'.")
-      .version("3.0.0")
       .stringConf
       .createOptional
 
@@ -2382,7 +2144,6 @@ object SQLConf {
       "MapFromEntries, StringToMap, MapConcat and TransformKeys. When EXCEPTION, the query " +
       "fails if duplicated map keys are detected. When LAST_WIN, the map key that is inserted " +
       "at last takes precedence.")
-    .version("3.0.0")
     .stringConf
     .transform(_.toUpperCase(Locale.ROOT))
     .checkValues(MapKeyDedupPolicy.values.map(_.toString))
@@ -2391,7 +2152,6 @@ object SQLConf {
   val LEGACY_LOOSE_UPCAST = buildConf("spark.sql.legacy.doLooseUpcast")
     .internal()
     .doc("When true, the upcast will be loose and allows string to atomic types.")
-    .version("3.0.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -2405,7 +2165,6 @@ object SQLConf {
       "CORRECTED, inner CTE definitions take precedence. The default value is EXCEPTION, " +
       "AnalysisException is thrown while name conflict is detected in nested CTE. This config " +
       "will be removed in future versions and CORRECTED will be the only behavior.")
-    .version("3.0.0")
     .stringConf
     .transform(_.toUpperCase(Locale.ROOT))
     .checkValues(LegacyBehaviorPolicy.values.map(_.toString))
@@ -2418,7 +2177,6 @@ object SQLConf {
       "When set to CORRECTED, classes from java.time.* packages are used for the same purpose. " +
       "The default value is EXCEPTION, RuntimeException is thrown when we will get different " +
       "results.")
-    .version("3.0.0")
     .stringConf
     .transform(_.toUpperCase(Locale.ROOT))
     .checkValues(LegacyBehaviorPolicy.values.map(_.toString))
@@ -2428,7 +2186,6 @@ object SQLConf {
     buildConf("spark.sql.legacy.followThreeValuedLogicInArrayExists")
       .internal()
       .doc("When true, the ArrayExists will follow the three-valued boolean logic.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -2437,7 +2194,6 @@ object SQLConf {
       .doc("A comma-delimited string config of the optional additional remote Maven mirror " +
         "repositories. This is only used for downloading Hive jars in IsolatedClientLoader " +
         "if the default Maven Central repo is unreachable.")
-      .version("3.0.0")
       .stringConf
       .createWithDefault(
         "https://maven-central.storage-download.googleapis.com/maven2/")
@@ -2450,7 +2206,6 @@ object SQLConf {
         "all interval units out of the specified range. If it is set to `false`, " +
         "`ParseException` is thrown if the input does not match to the pattern " +
         "defined by `from` and `to`.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -2460,7 +2215,6 @@ object SQLConf {
       .doc("When true, all database and table properties are not reserved and available for " +
         "create/alter syntaxes. But please be aware that the reserved properties will be " +
         "silently removed.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -2469,7 +2223,6 @@ object SQLConf {
       .internal()
       .doc("When true, only a single file can be added using ADD FILE. If false, then users " +
         "can add directory by passing directory path to ADD FILE.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -2477,13 +2230,11 @@ object SQLConf {
     buildConf("spark.sql.legacy.mssqlserver.numericMapping.enabled")
       .internal()
       .doc("When true, use legacy MySqlServer SMALLINT and REAL type mapping.")
-      .version("2.4.5")
       .booleanConf
       .createWithDefault(false)
 
   val CSV_FILTER_PUSHDOWN_ENABLED = buildConf("spark.sql.csv.filterPushdown.enabled")
     .doc("When true, enable filter pushdown to CSV datasource.")
-    .version("3.0.0")
     .booleanConf
     .createWithDefault(true)
 
@@ -2493,7 +2244,6 @@ object SQLConf {
       .doc("The number of partitions to be handled in one turn when use " +
         "`AlterTableAddPartitionCommand` to add partitions into table. The smaller " +
         "batch size is, the less memory is required for the real handler, e.g. Hive Metastore.")
-      .version("3.0.0")
       .intConf
       .checkValue(_ > 0, "The value of spark.sql.addPartitionInBatch.size must be positive")
       .createWithDefault(100)
@@ -2501,7 +2251,6 @@ object SQLConf {
   val LEGACY_ALLOW_HASH_ON_MAPTYPE = buildConf("spark.sql.legacy.allowHashOnMapType")
     .doc("When set to true, hash expressions can be applied on elements of MapType. Otherwise, " +
       "an analysis exception will be thrown.")
-    .version("3.0.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -2514,7 +2263,6 @@ object SQLConf {
         "a local date/timestamp in the source calendar, interpreting the resulted date/" +
         "timestamp in the target calendar, and getting the number of micros/millis/days " +
         "since the epoch 1970-01-01 00:00:00Z.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -2527,7 +2275,6 @@ object SQLConf {
         "a local date/timestamp in the source calendar, interpreting the resulted date/" +
         "timestamp in the target calendar, and getting the number of micros/millis/days " +
         "since the epoch 1970-01-01 00:00:00Z.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -2540,7 +2287,6 @@ object SQLConf {
         "a local date/timestamp in the source calendar, interpreting the resulted date/" +
         "timestamp in the target calendar, and getting the number of micros/millis/days " +
         "since the epoch 1970-01-01 00:00:00Z.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -2553,7 +2299,6 @@ object SQLConf {
         "a local date/timestamp in the source calendar, interpreting the resulted date/" +
         "timestamp in the target calendar, and getting the number of micros/millis/days " +
         "since the epoch 1970-01-01 00:00:00Z.")
-      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org