You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by we...@apache.org on 2017/06/02 16:58:05 UTC
spark git commit: [SPARK-20967][SQL] SharedState.externalCatalog is
not really lazy
Repository: spark
Updated Branches:
refs/heads/master 625cebfde -> d1b80ab92
[SPARK-20967][SQL] SharedState.externalCatalog is not really lazy
## What changes were proposed in this pull request?
`SharedState.externalCatalog` is marked as a `lazy val` but actually it's not lazy. We access `externalCatalog` while initializing `SharedState` and thus eliminate the effort of `lazy val`. When creating `ExternalCatalog` we will try to connect to the metastore and may throw an error, so it makes sense to make it a `lazy val` in `SharedState`.
## How was this patch tested?
existing tests.
Author: Wenchen Fan <we...@databricks.com>
Closes #18187 from cloud-fan/minor.
Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/d1b80ab9
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/d1b80ab9
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/d1b80ab9
Branch: refs/heads/master
Commit: d1b80ab9220d83e5fdaf33c513cc811dd17d0de1
Parents: 625cebf
Author: Wenchen Fan <we...@databricks.com>
Authored: Fri Jun 2 09:58:01 2017 -0700
Committer: Wenchen Fan <we...@databricks.com>
Committed: Fri Jun 2 09:58:01 2017 -0700
----------------------------------------------------------------------
.../apache/spark/sql/internal/SharedState.scala | 26 ++++++++++----------
1 file changed, 13 insertions(+), 13 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/spark/blob/d1b80ab9/sql/core/src/main/scala/org/apache/spark/sql/internal/SharedState.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/internal/SharedState.scala b/sql/core/src/main/scala/org/apache/spark/sql/internal/SharedState.scala
index a93b701..7202f12 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/internal/SharedState.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/internal/SharedState.scala
@@ -90,38 +90,38 @@ private[sql] class SharedState(val sparkContext: SparkContext) extends Logging {
/**
* A catalog that interacts with external systems.
*/
- lazy val externalCatalog: ExternalCatalog =
- SharedState.reflect[ExternalCatalog, SparkConf, Configuration](
+ lazy val externalCatalog: ExternalCatalog = {
+ val externalCatalog = SharedState.reflect[ExternalCatalog, SparkConf, Configuration](
SharedState.externalCatalogClassName(sparkContext.conf),
sparkContext.conf,
sparkContext.hadoopConfiguration)
- // Create the default database if it doesn't exist.
- {
val defaultDbDefinition = CatalogDatabase(
SessionCatalog.DEFAULT_DATABASE,
"default database",
CatalogUtils.stringToURI(warehousePath),
Map())
- // Initialize default database if it doesn't exist
+ // Create default database if it doesn't exist
if (!externalCatalog.databaseExists(SessionCatalog.DEFAULT_DATABASE)) {
// There may be another Spark application creating default database at the same time, here we
// set `ignoreIfExists = true` to avoid `DatabaseAlreadyExists` exception.
externalCatalog.createDatabase(defaultDbDefinition, ignoreIfExists = true)
}
- }
- // Make sure we propagate external catalog events to the spark listener bus
- externalCatalog.addListener(new ExternalCatalogEventListener {
- override def onEvent(event: ExternalCatalogEvent): Unit = {
- sparkContext.listenerBus.post(event)
- }
- })
+ // Make sure we propagate external catalog events to the spark listener bus
+ externalCatalog.addListener(new ExternalCatalogEventListener {
+ override def onEvent(event: ExternalCatalogEvent): Unit = {
+ sparkContext.listenerBus.post(event)
+ }
+ })
+
+ externalCatalog
+ }
/**
* A manager for global temporary views.
*/
- val globalTempViewManager: GlobalTempViewManager = {
+ lazy val globalTempViewManager: GlobalTempViewManager = {
// System preserved database should not exists in metastore. However it's hard to guarantee it
// for every session, because case-sensitivity differs. Here we always lowercase it to make our
// life easier.
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org