You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by gu...@apache.org on 2020/04/07 12:42:35 UTC

[spark] 04/04: [SPARK-30841][SQL][DOC][FOLLOW-UP] Add version information to the configuration of SQL

This is an automated email from the ASF dual-hosted git repository.

gurwls223 pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/spark.git

commit cae889c50fec42fc23c7618b7db39602776945f5
Author: beliefer <be...@163.com>
AuthorDate: Sun Mar 8 12:33:02 2020 +0900

    [SPARK-30841][SQL][DOC][FOLLOW-UP] Add version information to the configuration of SQL
    
    This PR follows https://github.com/apache/spark/pull/27691, https://github.com/apache/spark/pull/27730 and https://github.com/apache/spark/pull/27770
    I sorted out some information show below.
    
    Item name | Since version | JIRA ID | Commit ID | Note
    -- | -- | -- | -- | --
    spark.sql.redaction.options.regex | 2.2.2 | SPARK-23850 | 6a55d8b03053e616dcacb79cd2c29a06d219dc32#diff-9a6b543db706f1a90f790783d6930a13 |
    spark.sql.redaction.string.regex | 2.3.0 | SPARK-22791 | 28315714ddef3ddcc192375e98dd5207cf4ecc98#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.function.concatBinaryAsString | 2.3.0 | SPARK-22771 | f2b3525c17d660cf6f082bbafea8632615b4f58e#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.function.eltOutputAsString | 2.3.0 | SPARK-22937 | bf853018cabcd3b3abf84bfe534d2981020b4a71#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.sources.validatePartitionColumns | 3.0.0 | SPARK-26263 | 5a140b7844936cf2b65f08853b8cfd8c499d4f13#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.streaming.continuous.epochBacklogQueueSize | 3.0.0 | SPARK-24063 | c4bbfd177b4e7cb46f47b39df9fd71d2d9a12c6d#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.streaming.continuous.executorQueueSize | 2.3.0 | SPARK-22789 | 8941a4abcada873c26af924e129173dc33d66d71#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.streaming.continuous.executorPollIntervalMs | 2.3.0 | SPARK-22789 | 8941a4abcada873c26af924e129173dc33d66d71#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.sources.useV1SourceList | 3.0.0 | SPARK-28747 | cb06209fc908bac6ce6a8f20653865489773cbc3#diff-9a6b543db706f1a90f790783d6930a13 |
    spark.sql.streaming.disabledV2Writers | 2.3.1 | SPARK-23196 | 588b9694c1967ff45774431441e84081ee6eb515#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.streaming.disabledV2MicroBatchReaders | 2.4.0 | SPARK-23362 | 0a73aa31f41c83503d5d99eff3c9d7b406014ab3#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.sources.partitionOverwriteMode | 2.3.0 | SPARK-20236 | b96248862589bae1ddcdb14ce4c802789a001306#diff-9a6b543db706f1a90f790783d6930a13 |
    spark.sql.storeAssignmentPolicy | 3.0.0 | SPARK-28730 | 895c90b582cc2b2667241f66d5b733852aeef9eb#diff-9a6b543db706f1a90f790783d6930a13 |
    spark.sql.ansi.enabled | 3.0.0 | SPARK-30125 | d9b30694122f8716d3acb448638ef1e2b96ebc7a#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.execution.sortBeforeRepartition | 2.1.4 | SPARK-23207 and SPARK-22905 and SPARK-24564 and SPARK-25114 | 4d2d3d47e00e78893b1ecd5a9a9070adc5243ac9#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.optimizer.nestedSchemaPruning.enabled | 2.4.1 | SPARK-4502 | dfcff38394929970fee454c69864d0e10d59f8d4#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.optimizer.serializer.nestedSchemaPruning.enabled | 3.0.0 | SPARK-26837 | 0f2c0b53e8fb18c86c67b5dd679c006db93f94a5#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.optimizer.expression.nestedPruning.enabled | 3.0.0 | SPARK-27707 | 127bc899ae78d73332a87f0972b5db3c9936c1f1#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.execution.topKSortFallbackThreshold | 2.4.0 | SPARK-24193 | 8a837bf4f3f2758f7825d2362cf9de209026651a#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.csv.parser.columnPruning.enabled | 2.4.0 | SPARK-24244 and SPARK-24368 | 64fad0b519cf35b8c0a0dec18dd3df9488a5ed25#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.repl.eagerEval.enabled | 2.4.0 | SPARK-24215 | 6a0b77a55d53e74ac0a0892556c3a7a933474948#diff-9a6b543db706f1a90f790783d6930a13 |
    spark.sql.repl.eagerEval.maxNumRows | 2.4.0 | SPARK-24215 | 6a0b77a55d53e74ac0a0892556c3a7a933474948#diff-9a6b543db706f1a90f790783d6930a13 |
    spark.sql.repl.eagerEval.truncate | 2.4.0 | SPARK-24215 | 6a0b77a55d53e74ac0a0892556c3a7a933474948#diff-9a6b543db706f1a90f790783d6930a13 |
    spark.sql.codegen.aggregate.fastHashMap.capacityBit | 2.4.0 | SPARK-24978 | 6193a202aab0271b4532ee4b740318290f2c44a1#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.avro.compression.codec | 2.4.0 | SPARK-24881 | 0a0f68bae6c0a1bf30184b1e9ac6bf3805bd7511#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.avro.deflate.level | 2.4.0 | SPARK-24881 | 0a0f68bae6c0a1bf30184b1e9ac6bf3805bd7511#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.legacy.sizeOfNull | 2.4.0 | SPARK-24605 | d08f53dc61f662f5291f71bcbe1a7b9f531a34d2#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.legacy.replaceDatabricksSparkAvro.enabled | 2.4.0 | SPARK-25129 | ac0174e55af2e935d41545721e9f430c942b3a0c#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.legacy.setopsPrecedence.enabled | 2.4.0 | SPARK-24966 | 73dd6cf9b558f9d752e1f3c13584344257ad7863#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.legacy.exponentLiteralAsDecimal.enabled | 3.0.0 | SPARK-29956 | 87ebfaf003fcd05a7f6d23b3ecd4661409ce5f2f#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.legacy.allowNegativeScaleOfDecimal | 3.0.0 | SPARK-30812 | b76bc0b1b8b2abd00a84f805af90ca4c5925faaa#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.legacy.createHiveTableByDefault.enabled | 3.0.0 | SPARK-30098 | 58be82ad4b98fc17e821e916e69e77a6aa36209d#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.legacy.integralDivide.returnBigint | 3.0.0 | SPARK-25457 | 47d6e80a2e64823fabb596503fb6a6cc6f51f713#diff-9a6b543db706f1a90f790783d6930a13 | Exists in branch-3.0 branch, but the pom.xml file corresponding to the commit log is 2.5.0-SNAPSHOT
    spark.sql.legacy.bucketedTableScan.outputOrdering | 3.0.0 | SPARK-28595 | 469423f33887a966aaa33eb75f5e7974a0a97beb#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.legacy.parser.havingWithoutGroupByAsWhere | 2.4.1 | SPARK-25708 | 3dba5d41f1a66ae5eb08404d103284110c45a351#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.legacy.json.allowEmptyString.enabled | 3.0.0 | SPARK-25040 | d3de7568f32e298442f07b0a28b2c906de72c797#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.legacy.createEmptyCollectionUsingStringType | 3.0.0 | SPARK-30790 | 8ab6ae3ede96adb093347470a5cbbf17fe8c04e9#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.legacy.allowUntypedScalaUDF | 3.0.0 | SPARK-26580 | bc30a07ce262840c99a752db4fbd3a423f652017#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.truncateTable.ignorePermissionAcl.enabled | 2.4.6 | SPARK-30312 | 830a4ec59b86253f18eb7dfd6ed0bbe0d7920e5b#diff-9a6b543db706f1a90f790783d6930a13 |
    spark.sql.legacy.dataset.nameNonStructGroupingKeyAsValue | 3.0.0 | SPARK-26085 | ab2eafb3cdc7631452650c6cac03a92629255347#diff-9a6b543db706f1a90f790783d6930a13 |
    spark.sql.debug.maxToStringFields | 3.0.0 | SPARK-26066 | 81550b38e43fb20f89f529d2127575c71a54a538#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.maxPlanStringLength | 3.0.0 | SPARK-26103 | 812ad5546148d2194ab0e4230ee85b8f6a5be2fb#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.legacy.setCommandRejectsSparkCoreConfs | 3.0.0 | SPARK-26060 | 1ab3d3e474ce2e36d58aea8ad09fb61f0c73e5c5#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.datetime.java8API.enabled | 3.0.0 | SPARK-27008 | 52671d631d2a64ed1cfa0c6e01168908faf92df8#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.sources.binaryFile.maxLength | 3.0.0 | SPARK-27588 | 618d6bff71073c8c93501ab7392c3cc579730f0b#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.legacy.typeCoercion.datetimeToString.enabled | 3.0.0 | SPARK-27638 | 83d289eef492de8c7f3e5145f9bd75431608b500#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.defaultCatalog | 3.0.0 | SPARK-29753 | 942753a44beeae5f0142ceefa307e90cbc1234c5#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.catalog.$SESSION_CATALOG_NAME | 3.0.0 | SPARK-29412 | 9407fba0375675d6ee6461253f3b8230e8d67509#diff-9a6b543db706f1a90f790783d6930a13 |
    spark.sql.legacy.doLooseUpcast | 3.0.0 | SPARK-30812 | b76bc0b1b8b2abd00a84f805af90ca4c5925faaa#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.legacy.ctePrecedencePolicy | 3.0.0 | SPARK-30829 | 00943be81afbca6be13e1e72b24536cd98a788d6#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.legacy.timeParserPolicy | 3.1.0 | SPARK-30668 | 7db0af578585ecaeee9fd23f8189292289b52a97#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.legacy.followThreeValuedLogicInArrayExists | 3.0.0 | SPARK-30812 | b76bc0b1b8b2abd00a84f805af90ca4c5925faaa#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.maven.additionalRemoteRepositories | 3.0.0 | SPARK-29175 | 3d7359ad4202067b26a199657b6a3e1f38be0e4d#diff-9a6b543db706f1a90f790783d6930a13 |
    spark.sql.legacy.fromDayTimeString.enabled | 3.0.0 | SPARK-29864 and SPARK-29920 | e933539cdd557297daf97ff5e532a3f098896979#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.legacy.notReserveProperties | 3.0.0 | SPARK-30812 | b76bc0b1b8b2abd00a84f805af90ca4c5925faaa#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.legacy.addSingleFileInAddFile | 3.0.0 | SPARK-30234 | 8a8d1fbb10af6da481f26831cd519ef46ccbce6c#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.legacy.mssqlserver.numericMapping.enabled | 2.4.5 | SPARK-28152 | 69de7f31c37a7e0298e66cc814afc1b0aa948bbb#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.csv.filterPushdown.enabled | 3.0.0 | SPARK-30323 | 4e50f0291f032b4a5c0b46ed01fdef14e4cbb050#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.addPartitionInBatch.size | 3.0.0 | SPARK-29938 | 5ccbb38a71890b114c707279e7395d1f6284ebfd#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.legacy.timeParser.enabled | 3.0.0 | SPARK-30668 | 92f57237871400ab9d499e1174af22a867c01988#diff-9a6b543db706f1a90f790783d6930a13 |  
    spark.sql.legacy.allowDuplicatedMapKeys | 3.0.0 | SPARK-25829 | 33329caa81827a245b84158b13234b88a4746e56#diff-9a6b543db706f1a90f790783d6930a13 |  
    
    Supplemental configuration version information.
    
    No
    
    Exists UT
    
    Closes #27829 from beliefer/add-version-to-sql-config-part-four.
    
    Authored-by: beliefer <be...@163.com>
    Signed-off-by: HyukjinKwon <gu...@apache.org>
---
 .../org/apache/spark/sql/internal/SQLConf.scala    | 103 +++++++++++----------
 1 file changed, 54 insertions(+), 49 deletions(-)

diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
index 378e900..420102b 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
@@ -449,6 +449,7 @@ object SQLConf {
         s"${SHUFFLE_PARTITIONS.key}. This configuration only has an effect when " +
         s"'${ADAPTIVE_EXECUTION_ENABLED.key}' and '${COALESCE_PARTITIONS_ENABLED.key}' " +
         "are both true.")
+      .version("3.0.0")
       .intConf
       .checkValue(_ > 0, "The initial number of partitions must be positive.")
       .createOptional
@@ -1890,6 +1891,7 @@ object SQLConf {
         "information. The values of options whose names that match this regex will be redacted " +
         "in the explain output. This redaction is applied on top of the global redaction " +
         s"configuration defined by ${SECRET_REDACTION_PATTERN.key}.")
+    .version("2.2.2")
     .regexConf
     .createWithDefault("(?i)url".r)
 
@@ -1899,20 +1901,20 @@ object SQLConf {
         "information. When this regex matches a string part, that string part is replaced by a " +
         "dummy value. This is currently used to redact the output of SQL explain commands. " +
         "When this conf is not set, the value from `spark.redaction.string.regex` is used.")
-      .version("")
+      .version("2.3.0")
       .fallbackConf(org.apache.spark.internal.config.STRING_REDACTION_PATTERN)
 
   val CONCAT_BINARY_AS_STRING = buildConf("spark.sql.function.concatBinaryAsString")
     .doc("When this option is set to false and all inputs are binary, `functions.concat` returns " +
       "an output as binary. Otherwise, it returns as a string.")
-    .version("")
+    .version("2.3.0")
     .booleanConf
     .createWithDefault(false)
 
   val ELT_OUTPUT_AS_STRING = buildConf("spark.sql.function.eltOutputAsString")
     .doc("When this option is set to false and all inputs are binary, `elt` returns " +
       "an output as binary. Otherwise, it returns as a string.")
-    .version("")
+    .version("2.3.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -1923,7 +1925,7 @@ object SQLConf {
         "user-specified schema. If the validation fails, a runtime exception is thrown. " +
         "When this option is set to false, the partition column value will be converted to null " +
         "if it can not be casted to corresponding user-specified schema.")
-      .version("")
+      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -1931,25 +1933,25 @@ object SQLConf {
     buildConf("spark.sql.streaming.continuous.epochBacklogQueueSize")
       .doc("The max number of entries to be stored in queue to wait for late epochs. " +
         "If this parameter is exceeded by the size of the queue, stream will stop with an error.")
-      .version("")
+      .version("3.0.0")
       .intConf
       .createWithDefault(10000)
 
   val CONTINUOUS_STREAMING_EXECUTOR_QUEUE_SIZE =
     buildConf("spark.sql.streaming.continuous.executorQueueSize")
-      .internal()
-      .doc("The size (measured in number of rows) of the queue used in continuous execution to" +
-        " buffer the results of a ContinuousDataReader.")
-      .version("")
-      .intConf
-      .createWithDefault(1024)
+    .internal()
+    .doc("The size (measured in number of rows) of the queue used in continuous execution to" +
+      " buffer the results of a ContinuousDataReader.")
+    .version("2.3.0")
+    .intConf
+    .createWithDefault(1024)
 
   val CONTINUOUS_STREAMING_EXECUTOR_POLL_INTERVAL_MS =
     buildConf("spark.sql.streaming.continuous.executorPollIntervalMs")
       .internal()
       .doc("The interval at which continuous execution readers will poll to check whether" +
         " the epoch has advanced on the driver.")
-      .version("")
+      .version("2.3.0")
       .timeConf(TimeUnit.MILLISECONDS)
       .createWithDefault(100)
 
@@ -1958,14 +1960,14 @@ object SQLConf {
     .doc("A comma-separated list of data source short names or fully qualified data source " +
       "implementation class names for which Data Source V2 code path is disabled. These data " +
       "sources will fallback to Data Source V1 code path.")
-    .version("")
+    .version("3.0.0")
     .stringConf
     .createWithDefault("avro,csv,json,kafka,orc,parquet,text")
 
   val DISABLED_V2_STREAMING_WRITERS = buildConf("spark.sql.streaming.disabledV2Writers")
     .doc("A comma-separated list of fully qualified data source register class names for which" +
       " StreamWriteSupport is disabled. Writes to these sources will fall back to the V1 Sinks.")
-    .version("")
+    .version("2.3.1")
     .stringConf
     .createWithDefault("")
 
@@ -1976,7 +1978,7 @@ object SQLConf {
         "A comma-separated list of fully qualified data source register class names for which " +
           "MicroBatchReadSupport is disabled. Reads from these sources will fall back to the " +
           "V1 Sources.")
-      .version("")
+      .version("2.4.0")
       .stringConf
       .createWithDefault("")
 
@@ -1997,7 +1999,7 @@ object SQLConf {
         "(which takes precedence over this setting), e.g. " +
         "dataframe.write.option(\"partitionOverwriteMode\", \"dynamic\").save(path)."
       )
-      .version("")
+      .version("2.3.0")
       .stringConf
       .transform(_.toUpperCase(Locale.ROOT))
       .checkValues(PartitionOverwriteMode.values.map(_.toString))
@@ -2022,7 +2024,7 @@ object SQLConf {
         "in type coercion, e.g. converting `double` to `int` or `decimal` to `double` is " +
         "not allowed."
       )
-      .version("")
+      .version("3.0.0")
       .stringConf
       .transform(_.toUpperCase(Locale.ROOT))
       .checkValues(StoreAssignmentPolicy.values.map(_.toString))
@@ -2033,7 +2035,7 @@ object SQLConf {
       "throw a runtime exception if an overflow occurs in any operation on integral/decimal " +
       "field. 2. Spark will forbid using the reserved keywords of ANSI SQL as identifiers in " +
       "the SQL parser.")
-    .version("")
+    .version("3.0.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -2046,7 +2048,7 @@ object SQLConf {
         "issues. Turn on this config to insert a local sort before actually doing repartition " +
         "to generate consistent repartition results. The performance of repartition() may go " +
         "down since we insert extra local sort before it.")
-      .version("")
+      .version("2.1.4")
       .booleanConf
       .createWithDefault(true)
 
@@ -2057,7 +2059,7 @@ object SQLConf {
         "satisfying a query. This optimization allows columnar file format readers to avoid " +
         "reading unnecessary nested column data. Currently Parquet and ORC are the " +
         "data sources that implement this optimization.")
-      .version("")
+      .version("2.4.1")
       .booleanConf
       .createWithDefault(true)
 
@@ -2068,6 +2070,7 @@ object SQLConf {
         "containing `dots` to data sources. Currently, Parquet implements both optimizations " +
         "while ORC only supports predicates for names containing `dots`. The other data sources" +
         "don't support this feature yet.")
+      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -2077,7 +2080,7 @@ object SQLConf {
       .doc("Prune nested fields from object serialization operator which are unnecessary in " +
         "satisfying a query. This optimization allows object serializers to avoid " +
         "executing unnecessary nested expressions.")
-      .version("")
+      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -2088,7 +2091,7 @@ object SQLConf {
         "satisfying a query. Note that this optimization doesn't prune nested fields from " +
         "physical data source scanning. For pruning nested fields from scanning, please use " +
         "`spark.sql.optimizer.nestedSchemaPruning.enabled` config.")
-      .version("")
+      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -2098,7 +2101,7 @@ object SQLConf {
       .doc("In SQL queries with a SORT followed by a LIMIT like " +
           "'SELECT x FROM t ORDER BY y LIMIT m', if m is under this threshold, do a top-K sort" +
           " in memory, otherwise do a global sort which spills to disk if necessary.")
-      .version("")
+      .version("2.4.0")
       .intConf
       .createWithDefault(ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH)
 
@@ -2114,7 +2117,7 @@ object SQLConf {
     .internal()
     .doc("If it is set to true, column names of the requested schema are passed to CSV parser. " +
       "Other column values can be ignored during parsing even if they are malformed.")
-    .version("")
+    .version("2.4.0")
     .booleanConf
     .createWithDefault(true)
 
@@ -2125,7 +2128,7 @@ object SQLConf {
       "Jupyter, the HTML table (generated by _repr_html_) will be returned. For plain Python " +
       "REPL, the returned outputs are formatted like dataframe.show(). In SparkR, the returned " +
       "outputs are showed similar to R data.frame would.")
-    .version("")
+    .version("2.4.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -2134,14 +2137,14 @@ object SQLConf {
       s"effect when ${REPL_EAGER_EVAL_ENABLED.key} is set to true. The valid range of this " +
       "config is from 0 to (Int.MaxValue - 1), so the invalid config like negative and " +
       "greater than (Int.MaxValue - 1) will be normalized to 0 and (Int.MaxValue - 1).")
-    .version("")
+    .version("2.4.0")
     .intConf
     .createWithDefault(20)
 
   val REPL_EAGER_EVAL_TRUNCATE = buildConf("spark.sql.repl.eagerEval.truncate")
     .doc("The max number of characters for each cell that is returned by eager evaluation. " +
       s"This only takes effect when ${REPL_EAGER_EVAL_ENABLED.key} is set to true.")
-    .version("")
+    .version("2.4.0")
     .intConf
     .createWithDefault(20)
 
@@ -2152,7 +2155,7 @@ object SQLConf {
         "by the fast hash aggregate product operator. The bit is not for actual value, " +
         "but the actual numBuckets is determined by loadFactor " +
         "(e.g: default bit value 16 , the actual numBuckets is ((1 << 16) / 0.5).")
-      .version("")
+      .version("2.4.0")
       .intConf
       .checkValue(bit => bit >= 10 && bit <= 30, "The bit value must be in [10, 30].")
       .createWithDefault(16)
@@ -2160,7 +2163,7 @@ object SQLConf {
   val AVRO_COMPRESSION_CODEC = buildConf("spark.sql.avro.compression.codec")
     .doc("Compression codec used in writing of AVRO files. Supported codecs: " +
       "uncompressed, deflate, snappy, bzip2 and xz. Default codec is snappy.")
-    .version("")
+    .version("2.4.0")
     .stringConf
     .checkValues(Set("uncompressed", "deflate", "snappy", "bzip2", "xz"))
     .createWithDefault("snappy")
@@ -2198,7 +2201,7 @@ object SQLConf {
         "set operations are performed from left to right as they appear in the query. When set " +
         "to false and order of evaluation is not specified by parentheses, INTERSECT operations " +
         "are performed before any UNION, EXCEPT and MINUS operations.")
-      .version("")
+      .version("2.4.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -2207,7 +2210,7 @@ object SQLConf {
       .internal()
       .doc("When set to true, a literal with an exponent (e.g. 1E-30) would be parsed " +
         "as Decimal rather than Double.")
-      .version("")
+      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -2253,7 +2256,7 @@ object SQLConf {
       .internal()
       .doc("When set to true, the parser of JSON data source treats empty strings as null for " +
         "some data types such as `IntegerType`.")
-      .version("")
+      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -2291,7 +2294,7 @@ object SQLConf {
       .doc("When set to true, the key attribute resulted from running `Dataset.groupByKey` " +
         "for non-struct key type, will be named as `value`, following the behavior of Spark " +
         "version 2.4 and earlier.")
-      .version("")
+      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -2299,7 +2302,7 @@ object SQLConf {
     .doc("Maximum number of fields of sequence-like entries can be converted to strings " +
       "in debug output. Any elements beyond the limit will be dropped and replaced by a" +
       """ "... N more fields" placeholder.""")
-    .version("")
+    .version("3.0.0")
     .intConf
     .createWithDefault(25)
 
@@ -2308,7 +2311,7 @@ object SQLConf {
       "longer, further output will be truncated.  The default setting always generates a full " +
       "plan.  Set this to a lower value such as 8k if plan strings are taking up too much " +
       "memory or are causing OutOfMemory errors in the driver or UI processes.")
-    .version("")
+    .version("3.0.0")
     .bytesConf(ByteUnit.BYTE)
     .checkValue(i => i >= 0 && i <= ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH, "Invalid " +
       "value for 'spark.sql.maxPlanStringLength'.  Length must be a valid string length " +
@@ -2320,7 +2323,7 @@ object SQLConf {
       .internal()
       .doc("If it is set to true, SET command will fail when the key is registered as " +
         "a SparkConf entry.")
-      .version("")
+      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -2329,7 +2332,7 @@ object SQLConf {
       "java.time.LocalDate classes of Java 8 API are used as external types for " +
       "Catalyst's TimestampType and DateType. If it is set to false, java.sql.Timestamp " +
       "and java.sql.Date are used for the same purpose.")
-    .version("")
+    .version("3.0.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -2337,7 +2340,7 @@ object SQLConf {
     .doc("The max length of a file that can be read by the binary file data source. " +
       "Spark will fail fast and not attempt to read the file if its length exceeds this value. " +
       "The theoretical max is Int.MaxValue, though VMs might implement a smaller max.")
-    .version("")
+    .version("3.0.0")
     .internal()
     .intConf
     .createWithDefault(Int.MaxValue)
@@ -2347,14 +2350,14 @@ object SQLConf {
       .internal()
       .doc("If it is set to true, date/timestamp will cast to string in binary comparisons " +
         "with String")
-      .version("")
+      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
   val DEFAULT_CATALOG = buildConf("spark.sql.defaultCatalog")
     .doc("Name of the default catalog. This will be the current catalog if users have not " +
       "explicitly set the current catalog yet.")
-    .version("")
+    .version("3.0.0")
     .stringConf
     .createWithDefault(SESSION_CATALOG_NAME)
 
@@ -2366,7 +2369,7 @@ object SQLConf {
         s"be loaded by the $SESSION_CATALOG_NAME, this catalog must also return the table " +
         s"metadata. To delegate operations to the $SESSION_CATALOG_NAME, implementations can " +
         "extend 'CatalogExtension'.")
-      .version("")
+      .version("3.0.0")
       .stringConf
       .createOptional
 
@@ -2379,6 +2382,7 @@ object SQLConf {
       "MapFromEntries, StringToMap, MapConcat and TransformKeys. When EXCEPTION, the query " +
       "fails if duplicated map keys are detected. When LAST_WIN, the map key that is inserted " +
       "at last takes precedence.")
+    .version("3.0.0")
     .stringConf
     .transform(_.toUpperCase(Locale.ROOT))
     .checkValues(MapKeyDedupPolicy.values.map(_.toString))
@@ -2387,7 +2391,7 @@ object SQLConf {
   val LEGACY_LOOSE_UPCAST = buildConf("spark.sql.legacy.doLooseUpcast")
     .internal()
     .doc("When true, the upcast will be loose and allows string to atomic types.")
-    .version("")
+    .version("3.0.0")
     .booleanConf
     .createWithDefault(false)
 
@@ -2401,7 +2405,7 @@ object SQLConf {
       "CORRECTED, inner CTE definitions take precedence. The default value is EXCEPTION, " +
       "AnalysisException is thrown while name conflict is detected in nested CTE. This config " +
       "will be removed in future versions and CORRECTED will be the only behavior.")
-    .version("")
+    .version("3.0.0")
     .stringConf
     .transform(_.toUpperCase(Locale.ROOT))
     .checkValues(LegacyBehaviorPolicy.values.map(_.toString))
@@ -2414,6 +2418,7 @@ object SQLConf {
       "When set to CORRECTED, classes from java.time.* packages are used for the same purpose. " +
       "The default value is EXCEPTION, RuntimeException is thrown when we will get different " +
       "results.")
+    .version("3.0.0")
     .stringConf
     .transform(_.toUpperCase(Locale.ROOT))
     .checkValues(LegacyBehaviorPolicy.values.map(_.toString))
@@ -2423,7 +2428,7 @@ object SQLConf {
     buildConf("spark.sql.legacy.followThreeValuedLogicInArrayExists")
       .internal()
       .doc("When true, the ArrayExists will follow the three-valued boolean logic.")
-      .version("")
+      .version("3.0.0")
       .booleanConf
       .createWithDefault(true)
 
@@ -2432,7 +2437,7 @@ object SQLConf {
       .doc("A comma-delimited string config of the optional additional remote Maven mirror " +
         "repositories. This is only used for downloading Hive jars in IsolatedClientLoader " +
         "if the default Maven Central repo is unreachable.")
-      .version("")
+      .version("3.0.0")
       .stringConf
       .createWithDefault(
         "https://maven-central.storage-download.googleapis.com/maven2/")
@@ -2445,7 +2450,7 @@ object SQLConf {
         "all interval units out of the specified range. If it is set to `false`, " +
         "`ParseException` is thrown if the input does not match to the pattern " +
         "defined by `from` and `to`.")
-      .version("")
+      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -2455,7 +2460,7 @@ object SQLConf {
       .doc("When true, all database and table properties are not reserved and available for " +
         "create/alter syntaxes. But please be aware that the reserved properties will be " +
         "silently removed.")
-      .version("")
+      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -2464,7 +2469,7 @@ object SQLConf {
       .internal()
       .doc("When true, only a single file can be added using ADD FILE. If false, then users " +
         "can add directory by passing directory path to ADD FILE.")
-      .version("")
+      .version("3.0.0")
       .booleanConf
       .createWithDefault(false)
 
@@ -2472,7 +2477,7 @@ object SQLConf {
     buildConf("spark.sql.legacy.mssqlserver.numericMapping.enabled")
       .internal()
       .doc("When true, use legacy MySqlServer SMALLINT and REAL type mapping.")
-      .version("")
+      .version("2.4.5")
       .booleanConf
       .createWithDefault(false)
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org