You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by rx...@apache.org on 2016/04/29 23:54:43 UTC

spark git commit: [SPARK-15013][SQL] Remove hiveConf from HiveSessionState

Repository: spark
Updated Branches:
  refs/heads/master a04b1de5f -> af32f4aed


[SPARK-15013][SQL] Remove hiveConf from HiveSessionState

## What changes were proposed in this pull request?
The hiveConf in HiveSessionState is not actually used anymore. Let's remove it.

## How was this patch tested?
Existing tests

Author: Yin Huai <yh...@databricks.com>

Closes #12786 from yhuai/removeHiveConf.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/af32f4ae
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/af32f4ae
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/af32f4ae

Branch: refs/heads/master
Commit: af32f4aed650ba7acb381b98f3487e889e96f8c9
Parents: a04b1de
Author: Yin Huai <yh...@databricks.com>
Authored: Fri Apr 29 14:54:40 2016 -0700
Committer: Reynold Xin <rx...@databricks.com>
Committed: Fri Apr 29 14:54:40 2016 -0700

----------------------------------------------------------------------
 .../spark/sql/hive/HiveSessionState.scala       | 26 --------------------
 .../apache/spark/sql/hive/test/TestHive.scala   |  5 +---
 2 files changed, 1 insertion(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/af32f4ae/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala
index 9608f0b..b17a88b 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveSessionState.scala
@@ -45,31 +45,6 @@ private[hive] class HiveSessionState(sparkSession: SparkSession)
    */
   lazy val metadataHive: HiveClient = sharedState.metadataHive.newSession()
 
-  /**
-   * SQLConf and HiveConf contracts:
-   *
-   * 1. create a new o.a.h.hive.ql.session.SessionState for each HiveContext
-   * 2. when the Hive session is first initialized, params in HiveConf will get picked up by the
-   *    SQLConf.  Additionally, any properties set by set() or a SET command inside sql() will be
-   *    set in the SQLConf *as well as* in the HiveConf.
-   */
-  lazy val hiveconf: HiveConf = {
-    val initialConf = new HiveConf(
-      sparkSession.sparkContext.hadoopConfiguration,
-      classOf[org.apache.hadoop.hive.ql.session.SessionState])
-
-    // HiveConf is a Hadoop Configuration, which has a field of classLoader and
-    // the initial value will be the current thread's context class loader
-    // (i.e. initClassLoader at here).
-    // We call initialConf.setClassLoader(initClassLoader) at here to make
-    // this action explicit.
-    initialConf.setClassLoader(sparkSession.sharedState.jarClassLoader)
-    sparkSession.sparkContext.conf.getAll.foreach { case (k, v) =>
-      initialConf.set(k, v)
-    }
-    initialConf
-  }
-
   setDefaultOverrideConfs()
 
   /**
@@ -145,7 +120,6 @@ private[hive] class HiveSessionState(sparkSession: SparkSession)
   override def setConf(key: String, value: String): Unit = {
     super.setConf(key, value)
     metadataHive.runSqlHive(s"SET $key=$value")
-    hiveconf.set(key, value)
   }
 
   override def addJar(path: String): Unit = {

http://git-wip-us.apache.org/repos/asf/spark/blob/af32f4ae/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
----------------------------------------------------------------------
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
index c4a3a74..e763b63 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
@@ -153,9 +153,6 @@ private[hive] class TestHiveSparkSession(
   // By clearing the port we force Spark to pick a new one.  This allows us to rerun tests
   // without restarting the JVM.
   System.clearProperty("spark.hostPort")
-  CommandProcessorFactory.clean(sessionState.hiveconf)
-
-  sessionState.hiveconf.set("hive.plan.serialization.format", "javaXML")
 
   // For some hive test case which contain ${system:test.tmp.dir}
   System.setProperty("test.tmp.dir", Utils.createTempDir().getCanonicalPath)
@@ -423,7 +420,7 @@ private[hive] class TestHiveSparkSession(
         foreach { udfName => FunctionRegistry.unregisterTemporaryUDF(udfName) }
 
       // Some tests corrupt this value on purpose, which breaks the RESET call below.
-      sessionState.hiveconf.set("fs.default.name", new File(".").toURI.toString)
+      sessionState.conf.setConfString("fs.default.name", new File(".").toURI.toString)
       // It is important that we RESET first as broken hooks that might have been set could break
       // other sql exec here.
       sessionState.metadataHive.runSqlHive("RESET")


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org