You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by li...@apache.org on 2017/09/07 23:26:58 UTC

spark git commit: [SPARK-13656][SQL] Delete spark.sql.parquet.cacheMetadata from SQLConf and docs

Repository: spark
Updated Branches:
  refs/heads/master b9ab791a9 -> e00f1a1da


[SPARK-13656][SQL] Delete spark.sql.parquet.cacheMetadata from SQLConf and docs

## What changes were proposed in this pull request?

Since [SPARK-15639](https://github.com/apache/spark/pull/13701), `spark.sql.parquet.cacheMetadata` and `PARQUET_CACHE_METADATA` is not used. This PR removes from SQLConf and docs.

## How was this patch tested?

Pass the existing Jenkins.

Author: Dongjoon Hyun <do...@apache.org>

Closes #19129 from dongjoon-hyun/SPARK-13656.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/e00f1a1d
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/e00f1a1d
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/e00f1a1d

Branch: refs/heads/master
Commit: e00f1a1da12be4a1fdb7b89eb5e098aa16c5c2c3
Parents: b9ab791
Author: Dongjoon Hyun <do...@apache.org>
Authored: Thu Sep 7 16:26:56 2017 -0700
Committer: gatorsmile <ga...@gmail.com>
Committed: Thu Sep 7 16:26:56 2017 -0700

----------------------------------------------------------------------
 docs/sql-programming-guide.md                             | 10 +++-------
 .../scala/org/apache/spark/sql/internal/SQLConf.scala     |  7 -------
 2 files changed, 3 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/e00f1a1d/docs/sql-programming-guide.md
----------------------------------------------------------------------
diff --git a/docs/sql-programming-guide.md b/docs/sql-programming-guide.md
index 032073b..45ba4d1 100644
--- a/docs/sql-programming-guide.md
+++ b/docs/sql-programming-guide.md
@@ -926,13 +926,6 @@ Configuration of Parquet can be done using the `setConf` method on `SparkSession
   </td>
 </tr>
 <tr>
-  <td><code>spark.sql.parquet.cacheMetadata</code></td>
-  <td>true</td>
-  <td>
-    Turns on caching of Parquet schema metadata. Can speed up querying of static data.
-  </td>
-</tr>
-<tr>
   <td><code>spark.sql.parquet.compression.codec</code></td>
   <td>snappy</td>
   <td>
@@ -1588,6 +1581,9 @@ options.
       Note that this is different from the Hive behavior.
     - As a result, `DROP TABLE` statements on those tables will not remove the data.
 
+ - `spark.sql.parquet.cacheMetadata` is no longer used.
+   See [SPARK-13664](https://issues.apache.org/jira/browse/SPARK-13664) for details.
+
 ## Upgrading From Spark SQL 1.5 to 1.6
 
  - From Spark 1.6, by default the Thrift server runs in multi-session mode. Which means each JDBC/ODBC

http://git-wip-us.apache.org/repos/asf/spark/blob/e00f1a1d/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
----------------------------------------------------------------------
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
index db5d65c..d00c672 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala
@@ -284,11 +284,6 @@ object SQLConf {
     .booleanConf
     .createWithDefault(false)
 
-  val PARQUET_CACHE_METADATA = buildConf("spark.sql.parquet.cacheMetadata")
-    .doc("Turns on caching of Parquet schema metadata. Can speed up querying of static data.")
-    .booleanConf
-    .createWithDefault(true)
-
   val PARQUET_COMPRESSION = buildConf("spark.sql.parquet.compression.codec")
     .doc("Sets the compression codec use when writing Parquet files. Acceptable values include: " +
       "uncompressed, snappy, gzip, lzo.")
@@ -1010,8 +1005,6 @@ class SQLConf extends Serializable with Logging {
 
   def parquetCompressionCodec: String = getConf(PARQUET_COMPRESSION)
 
-  def parquetCacheMetadata: Boolean = getConf(PARQUET_CACHE_METADATA)
-
   def parquetVectorizedReaderEnabled: Boolean = getConf(PARQUET_VECTORIZED_READER_ENABLED)
 
   def columnBatchSize: Int = getConf(COLUMN_BATCH_SIZE)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org