You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by li...@apache.org on 2016/02/25 10:50:00 UTC
[3/3] spark git commit: [SPARK-13486][SQL] Move SQLConf into an
internal package
[SPARK-13486][SQL] Move SQLConf into an internal package
## What changes were proposed in this pull request?
This patch moves SQLConf into org.apache.spark.sql.internal package to make it very explicit that it is internal. Soon I will also submit more API work that creates implementations of interfaces in this internal package.
## How was this patch tested?
If it compiles, then the refactoring should work.
Author: Reynold Xin <rx...@databricks.com>
Closes #11363 from rxin/SPARK-13486.
Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/2b2c8c33
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/2b2c8c33
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/2b2c8c33
Branch: refs/heads/master
Commit: 2b2c8c33236677c916541f956f7b94bba014a9ce
Parents: 07f92ef
Author: Reynold Xin <rx...@databricks.com>
Authored: Thu Feb 25 17:49:50 2016 +0800
Committer: Cheng Lian <li...@databricks.com>
Committed: Thu Feb 25 17:49:50 2016 +0800
----------------------------------------------------------------------
project/MimaExcludes.scala | 6 +
.../scala/org/apache/spark/sql/DataFrame.scala | 1 +
.../org/apache/spark/sql/GroupedData.scala | 1 +
.../scala/org/apache/spark/sql/SQLConf.scala | 730 -------------------
.../scala/org/apache/spark/sql/SQLContext.scala | 3 +-
.../spark/sql/execution/ExistingRDD.scala | 3 +-
.../spark/sql/execution/SparkStrategies.scala | 3 +-
.../apache/spark/sql/execution/commands.scala | 3 +-
.../InsertIntoHadoopFsRelation.scala | 1 +
.../execution/datasources/SqlNewHadoopRDD.scala | 3 +-
.../execution/datasources/WriterContainer.scala | 1 +
.../parquet/CatalystSchemaConverter.scala | 4 +-
.../parquet/CatalystWriteSupport.scala | 2 +-
.../datasources/parquet/ParquetRelation.scala | 1 +
.../spark/sql/execution/debug/package.scala | 1 +
.../execution/local/BinaryHashJoinNode.scala | 2 +-
.../execution/local/BroadcastHashJoinNode.scala | 2 +-
.../sql/execution/local/ConvertToSafeNode.scala | 2 +-
.../execution/local/ConvertToUnsafeNode.scala | 2 +-
.../spark/sql/execution/local/ExpandNode.scala | 2 +-
.../spark/sql/execution/local/FilterNode.scala | 2 +-
.../sql/execution/local/IntersectNode.scala | 4 +-
.../spark/sql/execution/local/LimitNode.scala | 2 +-
.../spark/sql/execution/local/LocalNode.scala | 3 +-
.../execution/local/NestedLoopJoinNode.scala | 2 +-
.../spark/sql/execution/local/ProjectNode.scala | 2 +-
.../spark/sql/execution/local/SampleNode.scala | 2 +-
.../spark/sql/execution/local/SeqScanNode.scala | 2 +-
.../local/TakeOrderedAndProjectNode.scala | 2 +-
.../spark/sql/execution/local/UnionNode.scala | 2 +-
.../org/apache/spark/sql/internal/SQLConf.scala | 730 +++++++++++++++++++
.../apache/spark/sql/internal/package-info.java | 22 +
.../org/apache/spark/sql/internal/package.scala | 24 +
.../spark/sql/DataFrameAggregateSuite.scala | 1 +
.../apache/spark/sql/DataFramePivotSuite.scala | 1 +
.../org/apache/spark/sql/DataFrameSuite.scala | 1 +
.../scala/org/apache/spark/sql/JoinSuite.scala | 1 +
.../spark/sql/MultiSQLContextsSuite.scala | 1 +
.../scala/org/apache/spark/sql/QueryTest.scala | 1 +
.../apache/spark/sql/SQLConfEntrySuite.scala | 150 ----
.../org/apache/spark/sql/SQLConfSuite.scala | 132 ----
.../org/apache/spark/sql/SQLContextSuite.scala | 1 +
.../org/apache/spark/sql/SQLQuerySuite.scala | 1 +
.../execution/ExchangeCoordinatorSuite.scala | 1 +
.../spark/sql/execution/PlannerSuite.scala | 3 +-
.../columnar/PartitionBatchPruningSuite.scala | 1 +
.../execution/datasources/json/JsonSuite.scala | 1 +
.../parquet/ParquetFilterSuite.scala | 1 +
.../datasources/parquet/ParquetIOSuite.scala | 1 +
.../ParquetPartitionDiscoverySuite.scala | 1 +
.../datasources/parquet/ParquetQuerySuite.scala | 1 +
.../parquet/ParquetReadBenchmark.scala | 3 +-
.../datasources/parquet/ParquetTest.scala | 3 +-
.../sql/execution/joins/InnerJoinSuite.scala | 3 +-
.../sql/execution/joins/OuterJoinSuite.scala | 3 +-
.../sql/execution/joins/SemiJoinSuite.scala | 3 +-
.../spark/sql/execution/local/DummyNode.scala | 2 +-
.../sql/execution/local/HashJoinNodeSuite.scala | 2 +-
.../sql/execution/local/LocalNodeTest.scala | 2 +-
.../local/NestedLoopJoinNodeSuite.scala | 2 +-
.../sql/execution/metric/SQLMetricsSuite.scala | 1 +
.../spark/sql/internal/SQLConfEntrySuite.scala | 150 ++++
.../spark/sql/internal/SQLConfSuite.scala | 133 ++++
.../spark/sql/sources/DataSourceTest.scala | 1 +
.../spark/sql/sources/FilteredScanSuite.scala | 1 +
.../spark/sql/sources/PrunedScanSuite.scala | 1 +
.../spark/sql/sources/SaveLoadSuite.scala | 3 +-
.../apache/spark/sql/test/TestSQLContext.scala | 6 +-
.../hive/thriftserver/HiveThriftServer2.scala | 2 +-
.../SparkExecuteStatementOperation.scala | 3 +-
.../thriftserver/HiveThriftServer2Suites.scala | 2 +-
.../hive/execution/HiveCompatibilitySuite.scala | 2 +-
.../org/apache/spark/sql/hive/HiveContext.scala | 5 +-
.../hive/execution/InsertIntoHiveTable.scala | 2 +-
.../apache/spark/sql/hive/test/TestHive.scala | 3 +-
.../sql/hive/HiveMetastoreCatalogSuite.scala | 3 +-
.../sql/hive/MetastoreDataSourcesSuite.scala | 1 +
.../hive/ParquetHiveCompatibilitySuite.scala | 3 +-
.../spark/sql/hive/QueryPartitionSuite.scala | 1 +
.../apache/spark/sql/hive/StatisticsSuite.scala | 3 +-
.../hive/execution/AggregationQuerySuite.scala | 1 +
.../sql/hive/execution/SQLQuerySuite.scala | 1 +
.../sql/hive/orc/OrcHadoopFsRelationSuite.scala | 3 +-
.../spark/sql/hive/orc/OrcQuerySuite.scala | 1 +
.../apache/spark/sql/hive/parquetSuites.scala | 2 +
.../spark/sql/sources/BucketedReadSuite.scala | 1 +
.../spark/sql/sources/BucketedWriteSuite.scala | 3 +-
.../sources/ParquetHadoopFsRelationSuite.scala | 1 +
.../sql/sources/hadoopFsRelationSuites.scala | 1 +
89 files changed, 1172 insertions(+), 1063 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/project/MimaExcludes.scala
----------------------------------------------------------------------
diff --git a/project/MimaExcludes.scala b/project/MimaExcludes.scala
index b12aefc..14e3c90 100644
--- a/project/MimaExcludes.scala
+++ b/project/MimaExcludes.scala
@@ -275,6 +275,12 @@ object MimaExcludes {
) ++ Seq (
// SPARK-7729 Executor which has been killed should also be displayed on Executor Tab
ProblemFilters.exclude[MissingMethodProblem]("org.apache.spark.status.api.v1.ExecutorSummary.this")
+ ) ++ Seq(
+ // [SPARK-13486][SQL] Move SQLConf into an internal package
+ ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.SQLConf"),
+ ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.SQLConf$SQLConfEntry"),
+ ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.SQLConf$"),
+ ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.sql.SQLConf$SQLConfEntry$")
)
case v if v.startsWith("1.6") =>
Seq(
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
index f590ac0..abb8fe5 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrame.scala
@@ -41,6 +41,7 @@ import org.apache.spark.sql.execution.{ExplainCommand, FileRelation, LogicalRDD,
import org.apache.spark.sql.execution.datasources.{CreateTableUsingAsSelect, LogicalRelation}
import org.apache.spark.sql.execution.datasources.json.JacksonGenerator
import org.apache.spark.sql.execution.python.EvaluatePython
+import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.Utils
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala b/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala
index 66ec0e7..f06d161 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/GroupedData.scala
@@ -26,6 +26,7 @@ import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, Pivot}
import org.apache.spark.sql.catalyst.util.usePrettyExpression
+import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.NumericType
/**
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala
deleted file mode 100644
index a601c87..0000000
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLConf.scala
+++ /dev/null
@@ -1,730 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.spark.sql
-
-import java.util.Properties
-
-import scala.collection.immutable
-import scala.collection.JavaConverters._
-
-import org.apache.parquet.hadoop.ParquetOutputCommitter
-
-import org.apache.spark.Logging
-import org.apache.spark.sql.catalyst.CatalystConf
-import org.apache.spark.sql.catalyst.parser.ParserConf
-import org.apache.spark.util.Utils
-
-////////////////////////////////////////////////////////////////////////////////////////////////////
-// This file defines the configuration options for Spark SQL.
-////////////////////////////////////////////////////////////////////////////////////////////////////
-
-
-private[spark] object SQLConf {
-
- private val sqlConfEntries = java.util.Collections.synchronizedMap(
- new java.util.HashMap[String, SQLConfEntry[_]]())
-
- /**
- * An entry contains all meta information for a configuration.
- *
- * @param key the key for the configuration
- * @param defaultValue the default value for the configuration
- * @param valueConverter how to convert a string to the value. It should throw an exception if the
- * string does not have the required format.
- * @param stringConverter how to convert a value to a string that the user can use it as a valid
- * string value. It's usually `toString`. But sometimes, a custom converter
- * is necessary. E.g., if T is List[String], `a, b, c` is better than
- * `List(a, b, c)`.
- * @param doc the document for the configuration
- * @param isPublic if this configuration is public to the user. If it's `false`, this
- * configuration is only used internally and we should not expose it to the user.
- * @tparam T the value type
- */
- private[sql] class SQLConfEntry[T] private(
- val key: String,
- val defaultValue: Option[T],
- val valueConverter: String => T,
- val stringConverter: T => String,
- val doc: String,
- val isPublic: Boolean) {
-
- def defaultValueString: String = defaultValue.map(stringConverter).getOrElse("<undefined>")
-
- override def toString: String = {
- s"SQLConfEntry(key = $key, defaultValue=$defaultValueString, doc=$doc, isPublic = $isPublic)"
- }
- }
-
- private[sql] object SQLConfEntry {
-
- private def apply[T](
- key: String,
- defaultValue: Option[T],
- valueConverter: String => T,
- stringConverter: T => String,
- doc: String,
- isPublic: Boolean): SQLConfEntry[T] =
- sqlConfEntries.synchronized {
- if (sqlConfEntries.containsKey(key)) {
- throw new IllegalArgumentException(s"Duplicate SQLConfEntry. $key has been registered")
- }
- val entry =
- new SQLConfEntry[T](key, defaultValue, valueConverter, stringConverter, doc, isPublic)
- sqlConfEntries.put(key, entry)
- entry
- }
-
- def intConf(
- key: String,
- defaultValue: Option[Int] = None,
- doc: String = "",
- isPublic: Boolean = true): SQLConfEntry[Int] =
- SQLConfEntry(key, defaultValue, { v =>
- try {
- v.toInt
- } catch {
- case _: NumberFormatException =>
- throw new IllegalArgumentException(s"$key should be int, but was $v")
- }
- }, _.toString, doc, isPublic)
-
- def longConf(
- key: String,
- defaultValue: Option[Long] = None,
- doc: String = "",
- isPublic: Boolean = true): SQLConfEntry[Long] =
- SQLConfEntry(key, defaultValue, { v =>
- try {
- v.toLong
- } catch {
- case _: NumberFormatException =>
- throw new IllegalArgumentException(s"$key should be long, but was $v")
- }
- }, _.toString, doc, isPublic)
-
- def longMemConf(
- key: String,
- defaultValue: Option[Long] = None,
- doc: String = "",
- isPublic: Boolean = true): SQLConfEntry[Long] =
- SQLConfEntry(key, defaultValue, { v =>
- try {
- v.toLong
- } catch {
- case _: NumberFormatException =>
- try {
- Utils.byteStringAsBytes(v)
- } catch {
- case _: NumberFormatException =>
- throw new IllegalArgumentException(s"$key should be long, but was $v")
- }
- }
- }, _.toString, doc, isPublic)
-
- def doubleConf(
- key: String,
- defaultValue: Option[Double] = None,
- doc: String = "",
- isPublic: Boolean = true): SQLConfEntry[Double] =
- SQLConfEntry(key, defaultValue, { v =>
- try {
- v.toDouble
- } catch {
- case _: NumberFormatException =>
- throw new IllegalArgumentException(s"$key should be double, but was $v")
- }
- }, _.toString, doc, isPublic)
-
- def booleanConf(
- key: String,
- defaultValue: Option[Boolean] = None,
- doc: String = "",
- isPublic: Boolean = true): SQLConfEntry[Boolean] =
- SQLConfEntry(key, defaultValue, { v =>
- try {
- v.toBoolean
- } catch {
- case _: IllegalArgumentException =>
- throw new IllegalArgumentException(s"$key should be boolean, but was $v")
- }
- }, _.toString, doc, isPublic)
-
- def stringConf(
- key: String,
- defaultValue: Option[String] = None,
- doc: String = "",
- isPublic: Boolean = true): SQLConfEntry[String] =
- SQLConfEntry(key, defaultValue, v => v, v => v, doc, isPublic)
-
- def enumConf[T](
- key: String,
- valueConverter: String => T,
- validValues: Set[T],
- defaultValue: Option[T] = None,
- doc: String = "",
- isPublic: Boolean = true): SQLConfEntry[T] =
- SQLConfEntry(key, defaultValue, v => {
- val _v = valueConverter(v)
- if (!validValues.contains(_v)) {
- throw new IllegalArgumentException(
- s"The value of $key should be one of ${validValues.mkString(", ")}, but was $v")
- }
- _v
- }, _.toString, doc, isPublic)
-
- def seqConf[T](
- key: String,
- valueConverter: String => T,
- defaultValue: Option[Seq[T]] = None,
- doc: String = "",
- isPublic: Boolean = true): SQLConfEntry[Seq[T]] = {
- SQLConfEntry(
- key, defaultValue, _.split(",").map(valueConverter), _.mkString(","), doc, isPublic)
- }
-
- def stringSeqConf(
- key: String,
- defaultValue: Option[Seq[String]] = None,
- doc: String = "",
- isPublic: Boolean = true): SQLConfEntry[Seq[String]] = {
- seqConf(key, s => s, defaultValue, doc, isPublic)
- }
- }
-
- import SQLConfEntry._
-
- val ALLOW_MULTIPLE_CONTEXTS = booleanConf("spark.sql.allowMultipleContexts",
- defaultValue = Some(true),
- doc = "When set to true, creating multiple SQLContexts/HiveContexts is allowed." +
- "When set to false, only one SQLContext/HiveContext is allowed to be created " +
- "through the constructor (new SQLContexts/HiveContexts created through newSession " +
- "method is allowed). Please note that this conf needs to be set in Spark Conf. Once" +
- "a SQLContext/HiveContext has been created, changing the value of this conf will not" +
- "have effect.",
- isPublic = true)
-
- val COMPRESS_CACHED = booleanConf("spark.sql.inMemoryColumnarStorage.compressed",
- defaultValue = Some(true),
- doc = "When set to true Spark SQL will automatically select a compression codec for each " +
- "column based on statistics of the data.",
- isPublic = false)
-
- val COLUMN_BATCH_SIZE = intConf("spark.sql.inMemoryColumnarStorage.batchSize",
- defaultValue = Some(10000),
- doc = "Controls the size of batches for columnar caching. Larger batch sizes can improve " +
- "memory utilization and compression, but risk OOMs when caching data.",
- isPublic = false)
-
- val IN_MEMORY_PARTITION_PRUNING =
- booleanConf("spark.sql.inMemoryColumnarStorage.partitionPruning",
- defaultValue = Some(true),
- doc = "When true, enable partition pruning for in-memory columnar tables.",
- isPublic = false)
-
- val AUTO_BROADCASTJOIN_THRESHOLD = intConf("spark.sql.autoBroadcastJoinThreshold",
- defaultValue = Some(10 * 1024 * 1024),
- doc = "Configures the maximum size in bytes for a table that will be broadcast to all worker " +
- "nodes when performing a join. By setting this value to -1 broadcasting can be disabled. " +
- "Note that currently statistics are only supported for Hive Metastore tables where the " +
- "command<code>ANALYZE TABLE <tableName> COMPUTE STATISTICS noscan</code> has been run.")
-
- val DEFAULT_SIZE_IN_BYTES = longConf(
- "spark.sql.defaultSizeInBytes",
- doc = "The default table size used in query planning. By default, it is set to a larger " +
- "value than `spark.sql.autoBroadcastJoinThreshold` to be more conservative. That is to say " +
- "by default the optimizer will not choose to broadcast a table unless it knows for sure its" +
- "size is small enough.",
- isPublic = false)
-
- val SHUFFLE_PARTITIONS = intConf("spark.sql.shuffle.partitions",
- defaultValue = Some(200),
- doc = "The default number of partitions to use when shuffling data for joins or aggregations.")
-
- val SHUFFLE_TARGET_POSTSHUFFLE_INPUT_SIZE =
- longMemConf("spark.sql.adaptive.shuffle.targetPostShuffleInputSize",
- defaultValue = Some(64 * 1024 * 1024),
- doc = "The target post-shuffle input size in bytes of a task.")
-
- val ADAPTIVE_EXECUTION_ENABLED = booleanConf("spark.sql.adaptive.enabled",
- defaultValue = Some(false),
- doc = "When true, enable adaptive query execution.")
-
- val SHUFFLE_MIN_NUM_POSTSHUFFLE_PARTITIONS =
- intConf("spark.sql.adaptive.minNumPostShufflePartitions",
- defaultValue = Some(-1),
- doc = "The advisory minimal number of post-shuffle partitions provided to " +
- "ExchangeCoordinator. This setting is used in our test to make sure we " +
- "have enough parallelism to expose issues that will not be exposed with a " +
- "single partition. When the value is a non-positive value, this setting will" +
- "not be provided to ExchangeCoordinator.",
- isPublic = false)
-
- val SUBEXPRESSION_ELIMINATION_ENABLED = booleanConf("spark.sql.subexpressionElimination.enabled",
- defaultValue = Some(true),
- doc = "When true, common subexpressions will be eliminated.",
- isPublic = false)
-
- val CASE_SENSITIVE = booleanConf("spark.sql.caseSensitive",
- defaultValue = Some(true),
- doc = "Whether the query analyzer should be case sensitive or not.")
-
- val PARQUET_SCHEMA_MERGING_ENABLED = booleanConf("spark.sql.parquet.mergeSchema",
- defaultValue = Some(false),
- doc = "When true, the Parquet data source merges schemas collected from all data files, " +
- "otherwise the schema is picked from the summary file or a random data file " +
- "if no summary file is available.")
-
- val PARQUET_SCHEMA_RESPECT_SUMMARIES = booleanConf("spark.sql.parquet.respectSummaryFiles",
- defaultValue = Some(false),
- doc = "When true, we make assumption that all part-files of Parquet are consistent with " +
- "summary files and we will ignore them when merging schema. Otherwise, if this is " +
- "false, which is the default, we will merge all part-files. This should be considered " +
- "as expert-only option, and shouldn't be enabled before knowing what it means exactly.")
-
- val PARQUET_BINARY_AS_STRING = booleanConf("spark.sql.parquet.binaryAsString",
- defaultValue = Some(false),
- doc = "Some other Parquet-producing systems, in particular Impala and older versions of " +
- "Spark SQL, do not differentiate between binary data and strings when writing out the " +
- "Parquet schema. This flag tells Spark SQL to interpret binary data as a string to provide " +
- "compatibility with these systems.")
-
- val PARQUET_INT96_AS_TIMESTAMP = booleanConf("spark.sql.parquet.int96AsTimestamp",
- defaultValue = Some(true),
- doc = "Some Parquet-producing systems, in particular Impala, store Timestamp into INT96. " +
- "Spark would also store Timestamp as INT96 because we need to avoid precision lost of the " +
- "nanoseconds field. This flag tells Spark SQL to interpret INT96 data as a timestamp to " +
- "provide compatibility with these systems.")
-
- val PARQUET_CACHE_METADATA = booleanConf("spark.sql.parquet.cacheMetadata",
- defaultValue = Some(true),
- doc = "Turns on caching of Parquet schema metadata. Can speed up querying of static data.")
-
- val PARQUET_COMPRESSION = enumConf("spark.sql.parquet.compression.codec",
- valueConverter = v => v.toLowerCase,
- validValues = Set("uncompressed", "snappy", "gzip", "lzo"),
- defaultValue = Some("gzip"),
- doc = "Sets the compression codec use when writing Parquet files. Acceptable values include: " +
- "uncompressed, snappy, gzip, lzo.")
-
- val PARQUET_FILTER_PUSHDOWN_ENABLED = booleanConf("spark.sql.parquet.filterPushdown",
- defaultValue = Some(true),
- doc = "Enables Parquet filter push-down optimization when set to true.")
-
- val PARQUET_WRITE_LEGACY_FORMAT = booleanConf(
- key = "spark.sql.parquet.writeLegacyFormat",
- defaultValue = Some(false),
- doc = "Whether to follow Parquet's format specification when converting Parquet schema to " +
- "Spark SQL schema and vice versa.")
-
- val PARQUET_OUTPUT_COMMITTER_CLASS = stringConf(
- key = "spark.sql.parquet.output.committer.class",
- defaultValue = Some(classOf[ParquetOutputCommitter].getName),
- doc = "The output committer class used by Parquet. The specified class needs to be a " +
- "subclass of org.apache.hadoop.mapreduce.OutputCommitter. Typically, it's also a subclass " +
- "of org.apache.parquet.hadoop.ParquetOutputCommitter. NOTE: 1. Instead of SQLConf, this " +
- "option must be set in Hadoop Configuration. 2. This option overrides " +
- "\"spark.sql.sources.outputCommitterClass\".")
-
- val PARQUET_UNSAFE_ROW_RECORD_READER_ENABLED = booleanConf(
- key = "spark.sql.parquet.enableUnsafeRowRecordReader",
- defaultValue = Some(true),
- doc = "Enables using the custom ParquetUnsafeRowRecordReader.")
-
- // Note: this can not be enabled all the time because the reader will not be returning UnsafeRows.
- // Doing so is very expensive and we should remove this requirement instead of fixing it here.
- // Initial testing seems to indicate only sort requires this.
- val PARQUET_VECTORIZED_READER_ENABLED = booleanConf(
- key = "spark.sql.parquet.enableVectorizedReader",
- defaultValue = Some(false),
- doc = "Enables vectorized parquet decoding.")
-
- val ORC_FILTER_PUSHDOWN_ENABLED = booleanConf("spark.sql.orc.filterPushdown",
- defaultValue = Some(false),
- doc = "When true, enable filter pushdown for ORC files.")
-
- val HIVE_VERIFY_PARTITION_PATH = booleanConf("spark.sql.hive.verifyPartitionPath",
- defaultValue = Some(false),
- doc = "When true, check all the partition paths under the table\'s root directory " +
- "when reading data stored in HDFS.")
-
- val HIVE_METASTORE_PARTITION_PRUNING = booleanConf("spark.sql.hive.metastorePartitionPruning",
- defaultValue = Some(false),
- doc = "When true, some predicates will be pushed down into the Hive metastore so that " +
- "unmatching partitions can be eliminated earlier.")
-
- val NATIVE_VIEW = booleanConf("spark.sql.nativeView",
- defaultValue = Some(false),
- doc = "When true, CREATE VIEW will be handled by Spark SQL instead of Hive native commands. " +
- "Note that this function is experimental and should ony be used when you are using " +
- "non-hive-compatible tables written by Spark SQL. The SQL string used to create " +
- "view should be fully qualified, i.e. use `tbl1`.`col1` instead of `*` whenever " +
- "possible, or you may get wrong result.",
- isPublic = false)
-
- val CANONICAL_NATIVE_VIEW = booleanConf("spark.sql.nativeView.canonical",
- defaultValue = Some(true),
- doc = "When this option and spark.sql.nativeView are both true, Spark SQL tries to handle " +
- "CREATE VIEW statement using SQL query string generated from view definition logical " +
- "plan. If the logical plan doesn't have a SQL representation, we fallback to the " +
- "original native view implementation.",
- isPublic = false)
-
- val COLUMN_NAME_OF_CORRUPT_RECORD = stringConf("spark.sql.columnNameOfCorruptRecord",
- defaultValue = Some("_corrupt_record"),
- doc = "The name of internal column for storing raw/un-parsed JSON records that fail to parse.")
-
- val BROADCAST_TIMEOUT = intConf("spark.sql.broadcastTimeout",
- defaultValue = Some(5 * 60),
- doc = "Timeout in seconds for the broadcast wait time in broadcast joins.")
-
- // This is only used for the thriftserver
- val THRIFTSERVER_POOL = stringConf("spark.sql.thriftserver.scheduler.pool",
- doc = "Set a Fair Scheduler pool for a JDBC client session")
-
- val THRIFTSERVER_UI_STATEMENT_LIMIT = intConf("spark.sql.thriftserver.ui.retainedStatements",
- defaultValue = Some(200),
- doc = "The number of SQL statements kept in the JDBC/ODBC web UI history.")
-
- val THRIFTSERVER_UI_SESSION_LIMIT = intConf("spark.sql.thriftserver.ui.retainedSessions",
- defaultValue = Some(200),
- doc = "The number of SQL client sessions kept in the JDBC/ODBC web UI history.")
-
- // This is used to set the default data source
- val DEFAULT_DATA_SOURCE_NAME = stringConf("spark.sql.sources.default",
- defaultValue = Some("org.apache.spark.sql.parquet"),
- doc = "The default data source to use in input/output.")
-
- // This is used to control the when we will split a schema's JSON string to multiple pieces
- // in order to fit the JSON string in metastore's table property (by default, the value has
- // a length restriction of 4000 characters). We will split the JSON string of a schema
- // to its length exceeds the threshold.
- val SCHEMA_STRING_LENGTH_THRESHOLD = intConf("spark.sql.sources.schemaStringLengthThreshold",
- defaultValue = Some(4000),
- doc = "The maximum length allowed in a single cell when " +
- "storing additional schema information in Hive's metastore.",
- isPublic = false)
-
- val PARTITION_DISCOVERY_ENABLED = booleanConf("spark.sql.sources.partitionDiscovery.enabled",
- defaultValue = Some(true),
- doc = "When true, automatically discover data partitions.")
-
- val PARTITION_COLUMN_TYPE_INFERENCE =
- booleanConf("spark.sql.sources.partitionColumnTypeInference.enabled",
- defaultValue = Some(true),
- doc = "When true, automatically infer the data types for partitioned columns.")
-
- val PARTITION_MAX_FILES =
- intConf("spark.sql.sources.maxConcurrentWrites",
- defaultValue = Some(1),
- doc = "The maximum number of concurrent files to open before falling back on sorting when " +
- "writing out files using dynamic partitioning.")
-
- val BUCKETING_ENABLED = booleanConf("spark.sql.sources.bucketing.enabled",
- defaultValue = Some(true),
- doc = "When false, we will treat bucketed table as normal table")
-
- // The output committer class used by HadoopFsRelation. The specified class needs to be a
- // subclass of org.apache.hadoop.mapreduce.OutputCommitter.
- //
- // NOTE:
- //
- // 1. Instead of SQLConf, this option *must be set in Hadoop Configuration*.
- // 2. This option can be overriden by "spark.sql.parquet.output.committer.class".
- val OUTPUT_COMMITTER_CLASS =
- stringConf("spark.sql.sources.outputCommitterClass", isPublic = false)
-
- val PARALLEL_PARTITION_DISCOVERY_THRESHOLD = intConf(
- key = "spark.sql.sources.parallelPartitionDiscovery.threshold",
- defaultValue = Some(32),
- doc = "The degree of parallelism for schema merging and partition discovery of " +
- "Parquet data sources.")
-
- // Whether to perform eager analysis when constructing a dataframe.
- // Set to false when debugging requires the ability to look at invalid query plans.
- val DATAFRAME_EAGER_ANALYSIS = booleanConf(
- "spark.sql.eagerAnalysis",
- defaultValue = Some(true),
- doc = "When true, eagerly applies query analysis on DataFrame operations.",
- isPublic = false)
-
- // Whether to automatically resolve ambiguity in join conditions for self-joins.
- // See SPARK-6231.
- val DATAFRAME_SELF_JOIN_AUTO_RESOLVE_AMBIGUITY = booleanConf(
- "spark.sql.selfJoinAutoResolveAmbiguity",
- defaultValue = Some(true),
- isPublic = false)
-
- // Whether to retain group by columns or not in GroupedData.agg.
- val DATAFRAME_RETAIN_GROUP_COLUMNS = booleanConf(
- "spark.sql.retainGroupColumns",
- defaultValue = Some(true),
- isPublic = false)
-
- val DATAFRAME_PIVOT_MAX_VALUES = intConf(
- "spark.sql.pivotMaxValues",
- defaultValue = Some(10000),
- doc = "When doing a pivot without specifying values for the pivot column this is the maximum " +
- "number of (distinct) values that will be collected without error."
- )
-
- val RUN_SQL_ON_FILES = booleanConf("spark.sql.runSQLOnFiles",
- defaultValue = Some(true),
- isPublic = false,
- doc = "When true, we could use `datasource`.`path` as table in SQL query"
- )
-
- val PARSER_SUPPORT_QUOTEDID = booleanConf("spark.sql.parser.supportQuotedIdentifiers",
- defaultValue = Some(true),
- isPublic = false,
- doc = "Whether to use quoted identifier.\n false: default(past) behavior. Implies only" +
- "alphaNumeric and underscore are valid characters in identifiers.\n" +
- " true: implies column names can contain any character.")
-
- val PARSER_SUPPORT_SQL11_RESERVED_KEYWORDS = booleanConf(
- "spark.sql.parser.supportSQL11ReservedKeywords",
- defaultValue = Some(false),
- isPublic = false,
- doc = "This flag should be set to true to enable support for SQL2011 reserved keywords.")
-
- val WHOLESTAGE_CODEGEN_ENABLED = booleanConf("spark.sql.codegen.wholeStage",
- defaultValue = Some(true),
- doc = "When true, the whole stage (of multiple operators) will be compiled into single java" +
- " method",
- isPublic = false)
-
-
- object Deprecated {
- val MAPRED_REDUCE_TASKS = "mapred.reduce.tasks"
- val EXTERNAL_SORT = "spark.sql.planner.externalSort"
- val USE_SQL_AGGREGATE2 = "spark.sql.useAggregate2"
- val TUNGSTEN_ENABLED = "spark.sql.tungsten.enabled"
- val CODEGEN_ENABLED = "spark.sql.codegen"
- val UNSAFE_ENABLED = "spark.sql.unsafe.enabled"
- val SORTMERGE_JOIN = "spark.sql.planner.sortMergeJoin"
- }
-}
-
-/**
- * A class that enables the setting and getting of mutable config parameters/hints.
- *
- * In the presence of a SQLContext, these can be set and queried by passing SET commands
- * into Spark SQL's query functions (i.e. sql()). Otherwise, users of this class can
- * modify the hints by programmatically calling the setters and getters of this class.
- *
- * SQLConf is thread-safe (internally synchronized, so safe to be used in multiple threads).
- */
-private[sql] class SQLConf extends Serializable with CatalystConf with ParserConf with Logging {
- import SQLConf._
-
- /** Only low degree of contention is expected for conf, thus NOT using ConcurrentHashMap. */
- @transient protected[spark] val settings = java.util.Collections.synchronizedMap(
- new java.util.HashMap[String, String]())
-
- /** ************************ Spark SQL Params/Hints ******************* */
-
- private[spark] def useCompression: Boolean = getConf(COMPRESS_CACHED)
-
- private[spark] def parquetCompressionCodec: String = getConf(PARQUET_COMPRESSION)
-
- private[spark] def parquetCacheMetadata: Boolean = getConf(PARQUET_CACHE_METADATA)
-
- private[spark] def columnBatchSize: Int = getConf(COLUMN_BATCH_SIZE)
-
- private[spark] def numShufflePartitions: Int = getConf(SHUFFLE_PARTITIONS)
-
- private[spark] def targetPostShuffleInputSize: Long =
- getConf(SHUFFLE_TARGET_POSTSHUFFLE_INPUT_SIZE)
-
- private[spark] def adaptiveExecutionEnabled: Boolean = getConf(ADAPTIVE_EXECUTION_ENABLED)
-
- private[spark] def minNumPostShufflePartitions: Int =
- getConf(SHUFFLE_MIN_NUM_POSTSHUFFLE_PARTITIONS)
-
- private[spark] def parquetFilterPushDown: Boolean = getConf(PARQUET_FILTER_PUSHDOWN_ENABLED)
-
- private[spark] def orcFilterPushDown: Boolean = getConf(ORC_FILTER_PUSHDOWN_ENABLED)
-
- private[spark] def verifyPartitionPath: Boolean = getConf(HIVE_VERIFY_PARTITION_PATH)
-
- private[spark] def metastorePartitionPruning: Boolean = getConf(HIVE_METASTORE_PARTITION_PRUNING)
-
- private[spark] def nativeView: Boolean = getConf(NATIVE_VIEW)
-
- private[spark] def wholeStageEnabled: Boolean = getConf(WHOLESTAGE_CODEGEN_ENABLED)
-
- private[spark] def canonicalView: Boolean = getConf(CANONICAL_NATIVE_VIEW)
-
- def caseSensitiveAnalysis: Boolean = getConf(SQLConf.CASE_SENSITIVE)
-
- private[spark] def subexpressionEliminationEnabled: Boolean =
- getConf(SUBEXPRESSION_ELIMINATION_ENABLED)
-
- private[spark] def autoBroadcastJoinThreshold: Int = getConf(AUTO_BROADCASTJOIN_THRESHOLD)
-
- private[spark] def defaultSizeInBytes: Long =
- getConf(DEFAULT_SIZE_IN_BYTES, autoBroadcastJoinThreshold + 1L)
-
- private[spark] def isParquetBinaryAsString: Boolean = getConf(PARQUET_BINARY_AS_STRING)
-
- private[spark] def isParquetINT96AsTimestamp: Boolean = getConf(PARQUET_INT96_AS_TIMESTAMP)
-
- private[spark] def writeLegacyParquetFormat: Boolean = getConf(PARQUET_WRITE_LEGACY_FORMAT)
-
- private[spark] def inMemoryPartitionPruning: Boolean = getConf(IN_MEMORY_PARTITION_PRUNING)
-
- private[spark] def columnNameOfCorruptRecord: String = getConf(COLUMN_NAME_OF_CORRUPT_RECORD)
-
- private[spark] def broadcastTimeout: Int = getConf(BROADCAST_TIMEOUT)
-
- private[spark] def defaultDataSourceName: String = getConf(DEFAULT_DATA_SOURCE_NAME)
-
- private[spark] def partitionDiscoveryEnabled(): Boolean =
- getConf(SQLConf.PARTITION_DISCOVERY_ENABLED)
-
- private[spark] def partitionColumnTypeInferenceEnabled(): Boolean =
- getConf(SQLConf.PARTITION_COLUMN_TYPE_INFERENCE)
-
- private[spark] def parallelPartitionDiscoveryThreshold: Int =
- getConf(SQLConf.PARALLEL_PARTITION_DISCOVERY_THRESHOLD)
-
- private[spark] def bucketingEnabled(): Boolean = getConf(SQLConf.BUCKETING_ENABLED)
-
- // Do not use a value larger than 4000 as the default value of this property.
- // See the comments of SCHEMA_STRING_LENGTH_THRESHOLD above for more information.
- private[spark] def schemaStringLengthThreshold: Int = getConf(SCHEMA_STRING_LENGTH_THRESHOLD)
-
- private[spark] def dataFrameEagerAnalysis: Boolean = getConf(DATAFRAME_EAGER_ANALYSIS)
-
- private[spark] def dataFrameSelfJoinAutoResolveAmbiguity: Boolean =
- getConf(DATAFRAME_SELF_JOIN_AUTO_RESOLVE_AMBIGUITY)
-
- private[spark] def dataFrameRetainGroupColumns: Boolean = getConf(DATAFRAME_RETAIN_GROUP_COLUMNS)
-
- private[spark] def runSQLOnFile: Boolean = getConf(RUN_SQL_ON_FILES)
-
- def supportQuotedId: Boolean = getConf(PARSER_SUPPORT_QUOTEDID)
-
- def supportSQL11ReservedKeywords: Boolean = getConf(PARSER_SUPPORT_SQL11_RESERVED_KEYWORDS)
-
- /** ********************** SQLConf functionality methods ************ */
-
- /** Set Spark SQL configuration properties. */
- def setConf(props: Properties): Unit = settings.synchronized {
- props.asScala.foreach { case (k, v) => setConfString(k, v) }
- }
-
- /** Set the given Spark SQL configuration property using a `string` value. */
- def setConfString(key: String, value: String): Unit = {
- require(key != null, "key cannot be null")
- require(value != null, s"value cannot be null for key: $key")
- val entry = sqlConfEntries.get(key)
- if (entry != null) {
- // Only verify configs in the SQLConf object
- entry.valueConverter(value)
- }
- setConfWithCheck(key, value)
- }
-
- /** Set the given Spark SQL configuration property. */
- def setConf[T](entry: SQLConfEntry[T], value: T): Unit = {
- require(entry != null, "entry cannot be null")
- require(value != null, s"value cannot be null for key: ${entry.key}")
- require(sqlConfEntries.get(entry.key) == entry, s"$entry is not registered")
- setConfWithCheck(entry.key, entry.stringConverter(value))
- }
-
- /** Return the value of Spark SQL configuration property for the given key. */
- def getConfString(key: String): String = {
- Option(settings.get(key)).
- orElse {
- // Try to use the default value
- Option(sqlConfEntries.get(key)).map(_.defaultValueString)
- }.
- getOrElse(throw new NoSuchElementException(key))
- }
-
- /**
- * Return the value of Spark SQL configuration property for the given key. If the key is not set
- * yet, return `defaultValue`. This is useful when `defaultValue` in SQLConfEntry is not the
- * desired one.
- */
- def getConf[T](entry: SQLConfEntry[T], defaultValue: T): T = {
- require(sqlConfEntries.get(entry.key) == entry, s"$entry is not registered")
- Option(settings.get(entry.key)).map(entry.valueConverter).getOrElse(defaultValue)
- }
-
- /**
- * Return the value of Spark SQL configuration property for the given key. If the key is not set
- * yet, return `defaultValue` in [[SQLConfEntry]].
- */
- def getConf[T](entry: SQLConfEntry[T]): T = {
- require(sqlConfEntries.get(entry.key) == entry, s"$entry is not registered")
- Option(settings.get(entry.key)).map(entry.valueConverter).orElse(entry.defaultValue).
- getOrElse(throw new NoSuchElementException(entry.key))
- }
-
- /**
- * Return the `string` value of Spark SQL configuration property for the given key. If the key is
- * not set yet, return `defaultValue`.
- */
- def getConfString(key: String, defaultValue: String): String = {
- val entry = sqlConfEntries.get(key)
- if (entry != null && defaultValue != "<undefined>") {
- // Only verify configs in the SQLConf object
- entry.valueConverter(defaultValue)
- }
- Option(settings.get(key)).getOrElse(defaultValue)
- }
-
- /**
- * Return all the configuration properties that have been set (i.e. not the default).
- * This creates a new copy of the config properties in the form of a Map.
- */
- def getAllConfs: immutable.Map[String, String] =
- settings.synchronized { settings.asScala.toMap }
-
- /**
- * Return all the configuration definitions that have been defined in [[SQLConf]]. Each
- * definition contains key, defaultValue and doc.
- */
- def getAllDefinedConfs: Seq[(String, String, String)] = sqlConfEntries.synchronized {
- sqlConfEntries.values.asScala.filter(_.isPublic).map { entry =>
- (entry.key, entry.defaultValueString, entry.doc)
- }.toSeq
- }
-
- private def setConfWithCheck(key: String, value: String): Unit = {
- if (key.startsWith("spark.") && !key.startsWith("spark.sql.")) {
- logWarning(s"Attempt to set non-Spark SQL config in SQLConf: key = $key, value = $value")
- }
- settings.put(key, value)
- }
-
- private[spark] def unsetConf(key: String): Unit = {
- settings.remove(key)
- }
-
- private[spark] def unsetConf(entry: SQLConfEntry[_]): Unit = {
- settings.remove(entry.key)
- }
-
- private[spark] def clear(): Unit = {
- settings.clear()
- }
-}
-
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
index a2f3868..1c24d9e 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
@@ -31,7 +31,6 @@ import org.apache.spark.api.java.{JavaRDD, JavaSparkContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd}
import org.apache.spark.sql.{execution => sparkexecution}
-import org.apache.spark.sql.SQLConf.SQLConfEntry
import org.apache.spark.sql.catalyst.{InternalRow, _}
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.encoders.encoderFor
@@ -43,6 +42,8 @@ import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.execution.exchange.EnsureRequirements
import org.apache.spark.sql.execution.ui.{SQLListener, SQLTab}
+import org.apache.spark.sql.internal.SQLConf
+import org.apache.spark.sql.internal.SQLConf.SQLConfEntry
import org.apache.spark.sql.sources.BaseRelation
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.ExecutionListenerManager
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/ExistingRDD.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/ExistingRDD.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/ExistingRDD.scala
index 8649d2d..2cbe3f2 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/ExistingRDD.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/ExistingRDD.scala
@@ -18,7 +18,7 @@
package org.apache.spark.sql.execution
import org.apache.spark.rdd.RDD
-import org.apache.spark.sql.{AnalysisException, Row, SQLConf, SQLContext}
+import org.apache.spark.sql.{AnalysisException, Row, SQLContext}
import org.apache.spark.sql.catalyst.{CatalystTypeConverters, InternalRow}
import org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation
import org.apache.spark.sql.catalyst.expressions._
@@ -27,6 +27,7 @@ import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Statistics}
import org.apache.spark.sql.catalyst.plans.physical.{HashPartitioning, Partitioning, UnknownPartitioning}
import org.apache.spark.sql.execution.datasources.parquet.ParquetRelation
import org.apache.spark.sql.execution.metric.SQLMetrics
+import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources.{BaseRelation, HadoopFsRelation}
import org.apache.spark.sql.types.DataType
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala
index 247eb05..5fdf38c 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala
@@ -31,6 +31,7 @@ import org.apache.spark.sql.execution.{DescribeCommand => RunnableDescribeComman
import org.apache.spark.sql.execution.columnar.{InMemoryColumnarTableScan, InMemoryRelation}
import org.apache.spark.sql.execution.datasources.{CreateTableUsing, CreateTempTableUsing, DescribeCommand => LogicalDescribeCommand, _}
import org.apache.spark.sql.execution.joins.{BuildLeft, BuildRight}
+import org.apache.spark.sql.internal.SQLConf
private[sql] abstract class SparkStrategies extends QueryPlanner[SparkPlan] {
self: SparkPlanner =>
@@ -98,7 +99,7 @@ private[sql] abstract class SparkStrategies extends QueryPlanner[SparkPlan] {
* Join implementations are chosen with the following precedence:
*
* - Broadcast: if one side of the join has an estimated physical size that is smaller than the
- * user-configurable [[org.apache.spark.sql.SQLConf.AUTO_BROADCASTJOIN_THRESHOLD]] threshold
+ * user-configurable [[SQLConf.AUTO_BROADCASTJOIN_THRESHOLD]] threshold
* or if that side has an explicit broadcast hint (e.g. the user applied the
* [[org.apache.spark.sql.functions.broadcast()]] function to a DataFrame), then that side
* of the join will be broadcasted and the other side will be streamed, with no shuffling
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/commands.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/commands.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/commands.scala
index c6adb58..5574645 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/commands.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/commands.scala
@@ -21,12 +21,13 @@ import java.util.NoSuchElementException
import org.apache.spark.Logging
import org.apache.spark.rdd.RDD
-import org.apache.spark.sql.{DataFrame, Row, SQLConf, SQLContext}
+import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.sql.catalyst.{CatalystTypeConverters, InternalRow}
import org.apache.spark.sql.catalyst.errors.TreeNodeException
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
import org.apache.spark.sql.catalyst.plans.logical
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
/**
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/InsertIntoHadoopFsRelation.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/InsertIntoHadoopFsRelation.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/InsertIntoHadoopFsRelation.scala
index 2d3e171..c8b020d 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/InsertIntoHadoopFsRelation.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/InsertIntoHadoopFsRelation.scala
@@ -29,6 +29,7 @@ import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Project}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.{RunnableCommand, SQLExecution}
+import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources._
import org.apache.spark.util.Utils
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SqlNewHadoopRDD.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SqlNewHadoopRDD.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SqlNewHadoopRDD.scala
index 2591133..f4271d1 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SqlNewHadoopRDD.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SqlNewHadoopRDD.scala
@@ -32,7 +32,8 @@ import org.apache.spark.{Partition => SparkPartition, _}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.executor.DataReadMethod
-import org.apache.spark.sql.{SQLConf, SQLContext}
+import org.apache.spark.sql.internal.SQLConf
+import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.execution.datasources.parquet.UnsafeRowParquetRecordReader
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.{SerializableConfiguration, ShutdownHookManager}
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/WriterContainer.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/WriterContainer.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/WriterContainer.scala
index 7e5c8f2..c3db2a0 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/WriterContainer.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/WriterContainer.scala
@@ -31,6 +31,7 @@ import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.physical.HashPartitioning
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.UnsafeKVExternalSorter
+import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources.{HadoopFsRelation, OutputWriter, OutputWriterFactory}
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.util.SerializableConfiguration
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystSchemaConverter.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystSchemaConverter.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystSchemaConverter.scala
index 54dda0c..ab4250d 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystSchemaConverter.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystSchemaConverter.scala
@@ -25,8 +25,9 @@ import org.apache.parquet.schema.OriginalType._
import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName._
import org.apache.parquet.schema.Type.Repetition._
-import org.apache.spark.sql.{AnalysisException, SQLConf}
+import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.execution.datasources.parquet.CatalystSchemaConverter.{maxPrecisionForBytes, MAX_PRECISION_FOR_INT32, MAX_PRECISION_FOR_INT64}
+import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
/**
@@ -37,7 +38,6 @@ import org.apache.spark.sql.types._
* [[MessageType]] schemas.
*
* @see https://github.com/apache/parquet-format/blob/master/LogicalTypes.md
- *
* @constructor
* @param assumeBinaryIsString Whether unannotated BINARY fields should be assumed to be Spark SQL
* [[StringType]] fields when converting Parquet a [[MessageType]] to Spark SQL
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystWriteSupport.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystWriteSupport.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystWriteSupport.scala
index e78afa5..3508220 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystWriteSupport.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystWriteSupport.scala
@@ -30,11 +30,11 @@ import org.apache.parquet.hadoop.api.WriteSupport.WriteContext
import org.apache.parquet.io.api.{Binary, RecordConsumer}
import org.apache.spark.Logging
-import org.apache.spark.sql.SQLConf
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.SpecializedGetters
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.execution.datasources.parquet.CatalystSchemaConverter.{minBytesForPrecision, MAX_PRECISION_FOR_INT32, MAX_PRECISION_FOR_INT64}
+import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
/**
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetRelation.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetRelation.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetRelation.scala
index 1e686d4..184cbb2 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetRelation.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetRelation.scala
@@ -47,6 +47,7 @@ import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.util.LegacyTypeStringParser
import org.apache.spark.sql.execution.datasources.{PartitionSpec, _}
+import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types.{DataType, StructType}
import org.apache.spark.util.{SerializableConfiguration, Utils}
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala
index dbb6b65..95d033b 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/debug/package.scala
@@ -25,6 +25,7 @@ import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.trees.TreeNodeRef
+import org.apache.spark.sql.internal.SQLConf
/**
* Contains methods for debugging query execution.
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/local/BinaryHashJoinNode.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/BinaryHashJoinNode.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/BinaryHashJoinNode.scala
index 5934504..8f063e2 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/BinaryHashJoinNode.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/BinaryHashJoinNode.scala
@@ -17,10 +17,10 @@
package org.apache.spark.sql.execution.local
-import org.apache.spark.sql.SQLConf
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.execution.joins.{BuildLeft, BuildRight, BuildSide, HashedRelation}
+import org.apache.spark.sql.internal.SQLConf
/**
* A [[HashJoinNode]] that builds the [[HashedRelation]] according to the value of
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/local/BroadcastHashJoinNode.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/BroadcastHashJoinNode.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/BroadcastHashJoinNode.scala
index cd1c865..9ffa272 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/BroadcastHashJoinNode.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/BroadcastHashJoinNode.scala
@@ -18,10 +18,10 @@
package org.apache.spark.sql.execution.local
import org.apache.spark.broadcast.Broadcast
-import org.apache.spark.sql.SQLConf
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.execution.joins.{BuildLeft, BuildRight, BuildSide, HashedRelation}
+import org.apache.spark.sql.internal.SQLConf
/**
* A [[HashJoinNode]] for broadcast join. It takes a streamedNode and a broadcast
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/local/ConvertToSafeNode.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/ConvertToSafeNode.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/ConvertToSafeNode.scala
index b31c5a8..f79d795 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/ConvertToSafeNode.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/ConvertToSafeNode.scala
@@ -17,9 +17,9 @@
package org.apache.spark.sql.execution.local
-import org.apache.spark.sql.SQLConf
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{Attribute, FromUnsafeProjection, Projection}
+import org.apache.spark.sql.internal.SQLConf
case class ConvertToSafeNode(conf: SQLConf, child: LocalNode) extends UnaryLocalNode(conf) {
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/local/ConvertToUnsafeNode.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/ConvertToUnsafeNode.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/ConvertToUnsafeNode.scala
index de2f4e6..f3fa474 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/ConvertToUnsafeNode.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/ConvertToUnsafeNode.scala
@@ -17,9 +17,9 @@
package org.apache.spark.sql.execution.local
-import org.apache.spark.sql.SQLConf
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{Attribute, Projection, UnsafeProjection}
+import org.apache.spark.sql.internal.SQLConf
case class ConvertToUnsafeNode(conf: SQLConf, child: LocalNode) extends UnaryLocalNode(conf) {
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/local/ExpandNode.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/ExpandNode.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/ExpandNode.scala
index 85111bd..6ccd6db 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/ExpandNode.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/ExpandNode.scala
@@ -17,9 +17,9 @@
package org.apache.spark.sql.execution.local
-import org.apache.spark.sql.SQLConf
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
+import org.apache.spark.sql.internal.SQLConf
case class ExpandNode(
conf: SQLConf,
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/local/FilterNode.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/FilterNode.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/FilterNode.scala
index dd1113b..c5eb33c 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/FilterNode.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/FilterNode.scala
@@ -17,10 +17,10 @@
package org.apache.spark.sql.execution.local
-import org.apache.spark.sql.SQLConf
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{Attribute, Expression}
import org.apache.spark.sql.catalyst.expressions.codegen.GeneratePredicate
+import org.apache.spark.sql.internal.SQLConf
case class FilterNode(conf: SQLConf, condition: Expression, child: LocalNode)
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/local/IntersectNode.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/IntersectNode.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/IntersectNode.scala
index 740d485..e594e13 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/IntersectNode.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/IntersectNode.scala
@@ -19,9 +19,9 @@ package org.apache.spark.sql.execution.local
import scala.collection.mutable
-import org.apache.spark.sql.SQLConf
-import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Attribute
+import org.apache.spark.sql.catalyst.InternalRow
+import org.apache.spark.sql.internal.SQLConf
case class IntersectNode(conf: SQLConf, left: LocalNode, right: LocalNode)
extends BinaryLocalNode(conf) {
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/local/LimitNode.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/LimitNode.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/LimitNode.scala
index 401b10a..9af45ac 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/LimitNode.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/LimitNode.scala
@@ -17,9 +17,9 @@
package org.apache.spark.sql.execution.local
-import org.apache.spark.sql.SQLConf
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Attribute
+import org.apache.spark.sql.internal.SQLConf
case class LimitNode(conf: SQLConf, limit: Int, child: LocalNode) extends UnaryLocalNode(conf) {
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/local/LocalNode.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/LocalNode.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/LocalNode.scala
index 8726e48..a5d0969 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/LocalNode.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/LocalNode.scala
@@ -18,11 +18,12 @@
package org.apache.spark.sql.execution.local
import org.apache.spark.Logging
-import org.apache.spark.sql.{Row, SQLConf}
+import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.{CatalystTypeConverters, InternalRow}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.catalyst.plans.QueryPlan
+import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.StructType
/**
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/local/NestedLoopJoinNode.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/NestedLoopJoinNode.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/NestedLoopJoinNode.scala
index b93bde5..b5ea083 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/NestedLoopJoinNode.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/NestedLoopJoinNode.scala
@@ -17,11 +17,11 @@
package org.apache.spark.sql.execution.local
-import org.apache.spark.sql.SQLConf
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.{FullOuter, JoinType, LeftOuter, RightOuter}
import org.apache.spark.sql.execution.joins.{BuildLeft, BuildRight, BuildSide}
+import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.util.collection.{BitSet, CompactBuffer}
case class NestedLoopJoinNode(
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/local/ProjectNode.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/ProjectNode.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/ProjectNode.scala
index bd73b08..5fe068a 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/ProjectNode.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/ProjectNode.scala
@@ -17,9 +17,9 @@
package org.apache.spark.sql.execution.local
-import org.apache.spark.sql.SQLConf
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{Attribute, NamedExpression, UnsafeProjection}
+import org.apache.spark.sql.internal.SQLConf
case class ProjectNode(conf: SQLConf, projectList: Seq[NamedExpression], child: LocalNode)
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/local/SampleNode.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/SampleNode.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/SampleNode.scala
index 7937008..078fb50 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/SampleNode.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/SampleNode.scala
@@ -17,9 +17,9 @@
package org.apache.spark.sql.execution.local
-import org.apache.spark.sql.SQLConf
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Attribute
+import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.util.random.{BernoulliCellSampler, PoissonSampler}
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/local/SeqScanNode.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/SeqScanNode.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/SeqScanNode.scala
index b8467f6..8ebfe3a 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/SeqScanNode.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/SeqScanNode.scala
@@ -17,9 +17,9 @@
package org.apache.spark.sql.execution.local
-import org.apache.spark.sql.SQLConf
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Attribute
+import org.apache.spark.sql.internal.SQLConf
/**
* An operator that scans some local data collection in the form of Scala Seq.
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/local/TakeOrderedAndProjectNode.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/TakeOrderedAndProjectNode.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/TakeOrderedAndProjectNode.scala
index ca68b76..f52f5f7 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/TakeOrderedAndProjectNode.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/TakeOrderedAndProjectNode.scala
@@ -17,10 +17,10 @@
package org.apache.spark.sql.execution.local
-import org.apache.spark.sql.SQLConf
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateOrdering
+import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.util.BoundedPriorityQueue
case class TakeOrderedAndProjectNode(
http://git-wip-us.apache.org/repos/asf/spark/blob/2b2c8c33/sql/core/src/main/scala/org/apache/spark/sql/execution/local/UnionNode.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/UnionNode.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/UnionNode.scala
index 0f2b830..e53bc22 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/local/UnionNode.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/local/UnionNode.scala
@@ -17,9 +17,9 @@
package org.apache.spark.sql.execution.local
-import org.apache.spark.sql.SQLConf
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Attribute
+import org.apache.spark.sql.internal.SQLConf
case class UnionNode(conf: SQLConf, children: Seq[LocalNode]) extends LocalNode(conf) {
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org