You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by br...@apache.org on 2014/10/30 17:22:48 UTC

svn commit: r1635536 [6/28] - in /hive/branches/spark: ./ accumulo-handler/ accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/ accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/columns/ accumulo-handler/src/test/org/apache/hadoo...

Modified: hive/branches/spark/jdbc/src/java/org/apache/hive/jdbc/Utils.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/jdbc/src/java/org/apache/hive/jdbc/Utils.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/jdbc/src/java/org/apache/hive/jdbc/Utils.java (original)
+++ hive/branches/spark/jdbc/src/java/org/apache/hive/jdbc/Utils.java Thu Oct 30 16:22:33 2014
@@ -22,6 +22,7 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.sql.SQLException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
@@ -65,7 +66,9 @@ public class Utils {
 
     // Client param names:
     static final String AUTH_TYPE = "auth";
-    static final String AUTH_QOP = "sasl.qop";
+    // We're deprecating this variable's name.
+    static final String AUTH_QOP_DEPRECATED = "sasl.qop";
+    static final String AUTH_QOP = "saslQop";
     static final String AUTH_SIMPLE = "noSasl";
     static final String AUTH_TOKEN = "delegationToken";
     static final String AUTH_USER = "user";
@@ -78,14 +81,23 @@ public class Utils {
     static final String USE_SSL = "ssl";
     static final String SSL_TRUST_STORE = "sslTrustStore";
     static final String SSL_TRUST_STORE_PASSWORD = "trustStorePassword";
-    static final String TRANSPORT_MODE = "hive.server2.transport.mode";
-    static final String HTTP_PATH = "hive.server2.thrift.http.path";
+    // We're deprecating the name and placement of this in the parsed map (from hive conf vars to
+    // hive session vars).
+    static final String TRANSPORT_MODE_DEPRECATED = "hive.server2.transport.mode";
+    static final String TRANSPORT_MODE = "transportMode";
+    // We're deprecating the name and placement of this in the parsed map (from hive conf vars to
+    // hive session vars).
+    static final String HTTP_PATH_DEPRECATED = "hive.server2.thrift.http.path";
+    static final String HTTP_PATH = "httpPath";
     static final String SERVICE_DISCOVERY_MODE = "serviceDiscoveryMode";
-    // Don't use dynamic serice discovery
+    // Don't use dynamic service discovery
     static final String SERVICE_DISCOVERY_MODE_NONE = "none";
     // Use ZooKeeper for indirection while using dynamic service discovery
     static final String SERVICE_DISCOVERY_MODE_ZOOKEEPER = "zooKeeper";
     static final String ZOOKEEPER_NAMESPACE = "zooKeeperNamespace";
+    // Default namespace value on ZooKeeper.
+    // This value is used if the param "zooKeeperNamespace" is not specified in the JDBC Uri.
+    static final String ZOOKEEPER_DEFAULT_NAMESPACE = "hiveserver2";
 
     // Non-configurable params:
     // ZOOKEEPER_SESSION_TIMEOUT is not exposed as client configurable
@@ -214,10 +226,11 @@ public class Utils {
 
   // Verify success and optionally with_info status, else throw SQLException
   public static void verifySuccess(TStatus status, boolean withInfo) throws SQLException {
-    if ((status.getStatusCode() != TStatusCode.SUCCESS_STATUS) &&
-        (withInfo && (status.getStatusCode() != TStatusCode.SUCCESS_WITH_INFO_STATUS))) {
-        throw new HiveSQLException(status);
+    if (status.getStatusCode() == TStatusCode.SUCCESS_STATUS ||
+        (withInfo && status.getStatusCode() == TStatusCode.SUCCESS_WITH_INFO_STATUS)) {
+      return;
     }
+    throw new HiveSQLException(status);
   }
 
   /**
@@ -329,6 +342,25 @@ public class Utils {
       }
     }
 
+    // Handle all deprecations here:
+    String newUsage;
+    String usageUrlBase = "jdbc:hive2://<host>:<port>/dbName;";
+    // Handle deprecation of AUTH_QOP_DEPRECATED
+    newUsage = usageUrlBase + JdbcConnectionParams.AUTH_QOP + "=<qop_value>";
+    handleParamDeprecation(connParams.getSessionVars(), connParams.getSessionVars(),
+        JdbcConnectionParams.AUTH_QOP_DEPRECATED, JdbcConnectionParams.AUTH_QOP, newUsage);
+
+    // Handle deprecation of TRANSPORT_MODE_DEPRECATED
+    newUsage = usageUrlBase + JdbcConnectionParams.TRANSPORT_MODE + "=<transport_mode_value>";
+    handleParamDeprecation(connParams.getHiveConfs(), connParams.getSessionVars(),
+        JdbcConnectionParams.TRANSPORT_MODE_DEPRECATED, JdbcConnectionParams.TRANSPORT_MODE,
+        newUsage);
+
+    // Handle deprecation of HTTP_PATH_DEPRECATED
+    newUsage = usageUrlBase + JdbcConnectionParams.HTTP_PATH + "=<http_path_value>";
+    handleParamDeprecation(connParams.getHiveConfs(), connParams.getSessionVars(),
+        JdbcConnectionParams.HTTP_PATH_DEPRECATED, JdbcConnectionParams.HTTP_PATH, newUsage);
+
     // Extract host, port
     if (connParams.isEmbeddedMode()) {
       // In case of embedded mode we were supplied with an empty authority.
@@ -339,6 +371,7 @@ public class Utils {
       // Else substitute the dummy authority with a resolved one.
       // In case of dynamic service discovery using ZooKeeper, it picks a server uri from ZooKeeper
       String resolvedAuthorityString = resolveAuthority(connParams);
+      LOG.info("Resolved authority: " + resolvedAuthorityString);
       uri = uri.replace(dummyAuthorityString, resolvedAuthorityString);
       connParams.setJdbcUriString(uri);
       // Create a Java URI from the resolved URI for extracting the host/port
@@ -356,6 +389,26 @@ public class Utils {
   }
 
   /**
+   * Remove the deprecatedName param from the fromMap and put the key value in the toMap.
+   * Also log a deprecation message for the client.
+   * @param fromMap
+   * @param toMap
+   * @param deprecatedName
+   * @param newName
+   * @param newUsage
+   */
+  private static void handleParamDeprecation(Map<String, String> fromMap, Map<String, String> toMap,
+      String deprecatedName, String newName, String newUsage) {
+    if (fromMap.containsKey(deprecatedName)) {
+      LOG.warn("***** JDBC param deprecation *****");
+      LOG.warn("The use of " + deprecatedName + " is deprecated.");
+      LOG.warn("Please use " + newName +" like so: " + newUsage);
+      String paramValue = fromMap.remove(deprecatedName);
+      toMap.put(newName, paramValue);
+    }
+  }
+
+  /**
    * Get the authority string from the supplied uri, which could potentially contain multiple
    * host:port pairs.
    *
@@ -367,14 +420,30 @@ public class Utils {
   private static String getAuthorities(String uri, JdbcConnectionParams connParams)
       throws JdbcUriParseException {
     String authorities;
-    // For a jdbc uri like: jdbc:hive2://host1:port1,host2:port2,host3:port3/
-    // Extract the uri host:port list starting after "jdbc:hive2://", till the 1st "/" or EOL
+    /**
+     * For a jdbc uri like:
+     * jdbc:hive2://<host1>:<port1>,<host2>:<port2>/dbName;sess_var_list?conf_list#var_list
+     * Extract the uri host:port list starting after "jdbc:hive2://",
+     * till the 1st "/" or "?" or "#" whichever comes first & in the given order
+     * Examples:
+     * jdbc:hive2://host1:port1,host2:port2,host3:port3/db;k1=v1?k2=v2#k3=v3
+     * jdbc:hive2://host1:port1,host2:port2,host3:port3/;k1=v1?k2=v2#k3=v3
+     * jdbc:hive2://host1:port1,host2:port2,host3:port3?k2=v2#k3=v3
+     * jdbc:hive2://host1:port1,host2:port2,host3:port3#k3=v3
+     */
     int fromIndex = Utils.URL_PREFIX.length();
-    int toIndex = uri.indexOf("/", fromIndex);
+    int toIndex = -1;
+    ArrayList<String> toIndexChars = new ArrayList<String>(Arrays.asList("/", "?", "#"));
+    for (String toIndexChar : toIndexChars) {
+      toIndex = uri.indexOf(toIndexChar, fromIndex);
+      if (toIndex > 0) {
+        break;
+      }
+    }
     if (toIndex < 0) {
       authorities = uri.substring(fromIndex);
     } else {
-      authorities = uri.substring(fromIndex, uri.indexOf("/", fromIndex));
+      authorities = uri.substring(fromIndex, toIndex);
     }
     return authorities;
   }

Modified: hive/branches/spark/jdbc/src/java/org/apache/hive/jdbc/ZooKeeperHiveClientHelper.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/jdbc/src/java/org/apache/hive/jdbc/ZooKeeperHiveClientHelper.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/jdbc/src/java/org/apache/hive/jdbc/ZooKeeperHiveClientHelper.java (original)
+++ hive/branches/spark/jdbc/src/java/org/apache/hive/jdbc/ZooKeeperHiveClientHelper.java Thu Oct 30 16:22:33 2014
@@ -53,12 +53,16 @@ public class ZooKeeperHiveClientHelper {
     String zooKeeperEnsemble = connParams.getZooKeeperEnsemble();
     String zooKeeperNamespace =
         connParams.getSessionVars().get(JdbcConnectionParams.ZOOKEEPER_NAMESPACE);
+    if ((zooKeeperNamespace == null) || (zooKeeperNamespace.isEmpty())) {
+      zooKeeperNamespace = JdbcConnectionParams.ZOOKEEPER_DEFAULT_NAMESPACE;
+    }
     List<String> serverHosts;
     Random randomizer = new Random();
     String serverNode;
+    ZooKeeper zooKeeperClient = null;
     // Pick a random HiveServer2 host from the ZooKeeper namspace
     try {
-      ZooKeeper zooKeeperClient =
+      zooKeeperClient =
           new ZooKeeper(zooKeeperEnsemble, JdbcConnectionParams.ZOOKEEPER_SESSION_TIMEOUT,
               new ZooKeeperHiveClientHelper.DummyWatcher());
       // All the HiveServer2 host nodes that are in ZooKeeper currently
@@ -80,7 +84,15 @@ public class ZooKeeperHiveClientHelper {
       return serverUri;
     } catch (Exception e) {
       throw new ZooKeeperHiveClientException("Unable to read HiveServer2 uri from ZooKeeper", e);
+    } finally {
+      // Try to close the client connection with ZooKeeper
+      if (zooKeeperClient != null) {
+        try {
+          zooKeeperClient.close();
+        } catch (Exception e) {
+          // No-op
+        }
+      }
     }
   }
-
 }

Modified: hive/branches/spark/metastore/pom.xml
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/pom.xml?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/pom.xml (original)
+++ hive/branches/spark/metastore/pom.xml Thu Oct 30 16:22:33 2014
@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hive</groupId>
     <artifactId>hive</artifactId>
-    <version>0.14.0-SNAPSHOT</version>
+    <version>0.15.0-SNAPSHOT</version>
     <relativePath>../pom.xml</relativePath>
   </parent>
 

Modified: hive/branches/spark/metastore/scripts/upgrade/derby/hive-schema-0.14.0.derby.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/derby/hive-schema-0.14.0.derby.sql?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/derby/hive-schema-0.14.0.derby.sql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/derby/hive-schema-0.14.0.derby.sql Thu Oct 30 16:22:33 2014
@@ -318,8 +318,75 @@ ALTER TABLE "APP"."SDS" ADD CONSTRAINT "
 
 -- ----------------------------
 -- Transaction and Lock Tables
+-- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
 -- ----------------------------
-RUN 'hive-txn-schema-0.13.0.derby.sql';
+CREATE TABLE TXNS (
+  TXN_ID bigint PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED bigint NOT NULL,
+  TXN_LAST_HEARTBEAT bigint NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL
+);
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID bigint REFERENCES TXNS (TXN_ID),
+  TC_DATABASE varchar(128) NOT NULL,
+  TC_TABLE varchar(128),
+  TC_PARTITION varchar(767)
+);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID bigint,
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(128),
+  CTC_PARTITION varchar(767)
+);
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID bigint NOT NULL,
+  HL_LOCK_INT_ID bigint NOT NULL,
+  HL_TXNID bigint,
+  HL_DB varchar(128) NOT NULL,
+  HL_TABLE varchar(128),
+  HL_PARTITION varchar(767),
+  HL_LOCK_STATE char(1) NOT NULL,
+  HL_LOCK_TYPE char(1) NOT NULL,
+  HL_LAST_HEARTBEAT bigint NOT NULL,
+  HL_ACQUIRED_AT bigint,
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+); 
+
+CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID bigint PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_WORKER_ID varchar(128),
+  CQ_START bigint,
+  CQ_RUN_AS varchar(128)
+);
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT bigint NOT NULL
+);
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
 
 -- -----------------------------------------------------------------
 -- Record schema version. Should be the last step in the init script

Modified: hive/branches/spark/metastore/scripts/upgrade/derby/upgrade.order.derby
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/derby/upgrade.order.derby?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/derby/upgrade.order.derby (original)
+++ hive/branches/spark/metastore/scripts/upgrade/derby/upgrade.order.derby Thu Oct 30 16:22:33 2014
@@ -7,3 +7,4 @@
 0.11.0-to-0.12.0
 0.12.0-to-0.13.0
 0.13.0-to-0.14.0
+0.14.0-to-0.15.0

Copied: hive/branches/spark/metastore/scripts/upgrade/mssql/hive-txn-schema-0.14.0.mssql.sql (from r1634945, hive/trunk/metastore/scripts/upgrade/mssql/hive-txn-schema-0.14.0.mssql.sql)
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/mssql/hive-txn-schema-0.14.0.mssql.sql?p2=hive/branches/spark/metastore/scripts/upgrade/mssql/hive-txn-schema-0.14.0.mssql.sql&p1=hive/trunk/metastore/scripts/upgrade/mssql/hive-txn-schema-0.14.0.mssql.sql&r1=1634945&r2=1635536&rev=1635536&view=diff
==============================================================================
--- hive/trunk/metastore/scripts/upgrade/mssql/hive-txn-schema-0.14.0.mssql.sql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/mssql/hive-txn-schema-0.14.0.mssql.sql Thu Oct 30 16:22:33 2014
@@ -1 +1,101 @@
--- Licensed to the Apache Software Foundation (ASF) under one or more
-- contributor license agreements.  See the NOTICE file distributed with
-- this work for additional information regarding copyright ownership.
-- The ASF licenses this file to You under the Apache License, Version 2.0
-- (the License); you may not use this file except in compliance with
-- the License.  You may obtain a copy of the License at
--
--     http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an AS IS BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.

--
-- Tables for transaction management
-- 

CREATE TABLE COMPACTION_QUEUE(
	CQ_ID int NOT NULL,
	CQ_DATABASE varchar(128) NOT NULL,
	CQ_TABLE varchar(128) NOT NULL,
	CQ_PARTITION varchar(767) NULL,
	CQ_STATE char(1) NOT NULL,
	CQ_TYPE char(1) NOT NULL,
	CQ_WORKER_ID varchar(128) NULL,
	CQ_START int NULL,
	CQ_RUN_AS varchar(128) NULL,
PRIMARY KEY CLUSTERED 
(
	CQ_ID ASC
)
);

CREATE TABLE COMPLETED_TXN_COMPONENTS(
	CTC_TXNID int NULL,
	CTC_DATABASE varchar(128) NOT NULL,
	CTC_TABLE varchar(128) NULL,
	CTC_PARTITION varchar(767) NULL
);

CREATE TABLE HIVE_LOCKS(
	HL_LOCK_EXT_ID int NOT NULL,
	HL_LOCK_INT_ID int NOT NULL,
	HL_TXNID int NULL,
	HL_DB varchar(128) NOT NULL,
	HL_TABLE varchar(128) NULL,
	HL_PARTITION varchar(767) NULL,
	HL_LOCK_STATE char(1) NOT NULL,
	HL_LOCK_TYPE char(1) NOT NULL,
	HL_LAST_HEARTBEAT int NOT NULL,
	HL_ACQUIRED_AT int NULL,
	HL_USER varchar(128) NOT NULL,
	HL_HOST varchar(128) NOT NULL,
PRIMARY KEY CLUSTERED 
(
	HL_LOCK_EXT_ID ASC,
	HL_LOCK_INT_ID ASC
)
);

CREATE TABLE NEXT_COMPACTION_QUEUE_ID(
	NCQ_NEXT int NOT NULL
);

INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);

CREATE TABLE NEXT_LOCK_ID(
	NL_NEXT int NOT NULL
);

INSERT INTO NEXT_LOCK_ID VALUES(1);

CREATE TABLE NEXT_TXN_ID(
	NTXN_NEXT int NOT NULL
);

INSERT INTO NEXT_TXN_ID VALUES(1);

CREATE TABLE TXNS(
	TXN_ID int NOT NULL,
	TXN_STATE char(1) NOT NULL,
	TXN_STARTED int NOT NULL,
	TXN_LAST_HEARTBEAT int NOT NULL,
	TXN_USER varchar(128) NOT NULL,
	TXN_HOST varchar(128) NOT NULL,
PRIMARY KEY CLUSTERED 
(
	TXN_ID ASC
)
);

CREATE TABLE TXN_COMPONENTS(
	TC_TXNID int NULL,
	TC_DATABASE varchar(128) NOT NULL,
	TC_TABLE varchar(128) NULL,
	TC_PARTITION varchar(767) NULL
);

ALTER TABLE TXN_COMPONENTS  WITH CHECK ADD FOREIGN KEY(TC_TXNID) REFERENCES TXNS (TXN_ID);
\ No newline at end of file
+-- Licensed to the Apache Software Foundation (ASF) under one or more
+-- contributor license agreements.  See the NOTICE file distributed with
+-- this work for additional information regarding copyright ownership.
+-- The ASF licenses this file to You under the Apache License, Version 2.0
+-- (the License); you may not use this file except in compliance with
+-- the License.  You may obtain a copy of the License at
+--
+--     http://www.apache.org/licenses/LICENSE-2.0
+--
+-- Unless required by applicable law or agreed to in writing, software
+-- distributed under the License is distributed on an AS IS BASIS,
+-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-- See the License for the specific language governing permissions and
+-- limitations under the License.
+
+--
+-- Tables for transaction management
+-- 
+
+CREATE TABLE COMPACTION_QUEUE(
+	CQ_ID int NOT NULL,
+	CQ_DATABASE varchar(128) NOT NULL,
+	CQ_TABLE varchar(128) NOT NULL,
+	CQ_PARTITION varchar(767) NULL,
+	CQ_STATE char(1) NOT NULL,
+	CQ_TYPE char(1) NOT NULL,
+	CQ_WORKER_ID varchar(128) NULL,
+	CQ_START int NULL,
+	CQ_RUN_AS varchar(128) NULL,
+PRIMARY KEY CLUSTERED 
+(
+	CQ_ID ASC
+)
+);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS(
+	CTC_TXNID int NULL,
+	CTC_DATABASE varchar(128) NOT NULL,
+	CTC_TABLE varchar(128) NULL,
+	CTC_PARTITION varchar(767) NULL
+);
+
+CREATE TABLE HIVE_LOCKS(
+	HL_LOCK_EXT_ID int NOT NULL,
+	HL_LOCK_INT_ID int NOT NULL,
+	HL_TXNID int NULL,
+	HL_DB varchar(128) NOT NULL,
+	HL_TABLE varchar(128) NULL,
+	HL_PARTITION varchar(767) NULL,
+	HL_LOCK_STATE char(1) NOT NULL,
+	HL_LOCK_TYPE char(1) NOT NULL,
+	HL_LAST_HEARTBEAT int NOT NULL,
+	HL_ACQUIRED_AT int NULL,
+	HL_USER varchar(128) NOT NULL,
+	HL_HOST varchar(128) NOT NULL,
+PRIMARY KEY CLUSTERED 
+(
+	HL_LOCK_EXT_ID ASC,
+	HL_LOCK_INT_ID ASC
+)
+);
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID(
+	NCQ_NEXT int NOT NULL
+);
+
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+
+CREATE TABLE NEXT_LOCK_ID(
+	NL_NEXT int NOT NULL
+);
+
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE NEXT_TXN_ID(
+	NTXN_NEXT int NOT NULL
+);
+
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE TXNS(
+	TXN_ID int NOT NULL,
+	TXN_STATE char(1) NOT NULL,
+	TXN_STARTED int NOT NULL,
+	TXN_LAST_HEARTBEAT int NOT NULL,
+	TXN_USER varchar(128) NOT NULL,
+	TXN_HOST varchar(128) NOT NULL,
+PRIMARY KEY CLUSTERED 
+(
+	TXN_ID ASC
+)
+);
+
+CREATE TABLE TXN_COMPONENTS(
+	TC_TXNID int NULL,
+	TC_DATABASE varchar(128) NOT NULL,
+	TC_TABLE varchar(128) NULL,
+	TC_PARTITION varchar(767) NULL
+);
+
+ALTER TABLE TXN_COMPONENTS  WITH CHECK ADD FOREIGN KEY(TC_TXNID) REFERENCES TXNS (TXN_ID);

Modified: hive/branches/spark/metastore/scripts/upgrade/mssql/upgrade.order.mssql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/mssql/upgrade.order.mssql?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/mssql/upgrade.order.mssql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/mssql/upgrade.order.mssql Thu Oct 30 16:22:33 2014
@@ -1,3 +1,4 @@
 0.11.0-to-0.12.0
 0.12.0-to-0.13.0
 0.13.0-to-0.14.0
+0.14.0-to-0.15.0

Modified: hive/branches/spark/metastore/scripts/upgrade/mysql/hive-schema-0.14.0.mysql.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/mysql/hive-schema-0.14.0.mysql.sql?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/mysql/hive-schema-0.14.0.mysql.sql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/mysql/hive-schema-0.14.0.mysql.sql Thu Oct 30 16:22:33 2014
@@ -798,8 +798,77 @@ CREATE TABLE IF NOT EXISTS `FUNC_RU` (
 
 -- ----------------------------
 -- Transaction and Lock Tables
+-- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
 -- ----------------------------
-SOURCE hive-txn-schema-0.13.0.mysql.sql;
+CREATE TABLE TXNS (
+  TXN_ID bigint PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED bigint NOT NULL,
+  TXN_LAST_HEARTBEAT bigint NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID bigint,
+  TC_DATABASE varchar(128) NOT NULL,
+  TC_TABLE varchar(128),
+  TC_PARTITION varchar(767),
+  FOREIGN KEY (TC_TXNID) REFERENCES TXNS (TXN_ID)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID bigint,
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(128),
+  CTC_PARTITION varchar(767)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT bigint NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID bigint NOT NULL,
+  HL_LOCK_INT_ID bigint NOT NULL,
+  HL_TXNID bigint,
+  HL_DB varchar(128) NOT NULL,
+  HL_TABLE varchar(128),
+  HL_PARTITION varchar(767),
+  HL_LOCK_STATE char(1) not null,
+  HL_LOCK_TYPE char(1) not null,
+  HL_LAST_HEARTBEAT bigint NOT NULL,
+  HL_ACQUIRED_AT bigint,
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID),
+  KEY HIVE_LOCK_TXNID_INDEX (HL_TXNID)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE INDEX HL_TXNID_IDX ON HIVE_LOCKS (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT bigint NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID bigint PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_WORKER_ID varchar(128),
+  CQ_START bigint,
+  CQ_RUN_AS varchar(128)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT bigint NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
 
 -- -----------------------------------------------------------------
 -- Record schema version. Should be the last step in the init script

Modified: hive/branches/spark/metastore/scripts/upgrade/mysql/upgrade.order.mysql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/mysql/upgrade.order.mysql?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/mysql/upgrade.order.mysql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/mysql/upgrade.order.mysql Thu Oct 30 16:22:33 2014
@@ -7,3 +7,4 @@
 0.11.0-to-0.12.0
 0.12.0-to-0.13.0
 0.13.0-to-0.14.0
+0.14.0-to-0.15.0

Modified: hive/branches/spark/metastore/scripts/upgrade/oracle/hive-schema-0.14.0.oracle.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/oracle/hive-schema-0.14.0.oracle.sql?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/oracle/hive-schema-0.14.0.oracle.sql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/oracle/hive-schema-0.14.0.oracle.sql Thu Oct 30 16:22:33 2014
@@ -757,8 +757,75 @@ CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUN
 
 ------------------------------
 -- Transaction and lock tables
+-- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
 ------------------------------
-@hive-txn-schema-0.13.0.oracle.sql;
+CREATE TABLE TXNS (
+  TXN_ID NUMBER(19) PRIMARY KEY,
+  TXN_STATE char(1) NOT NULL,
+  TXN_STARTED NUMBER(19) NOT NULL,
+  TXN_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+  TXN_USER varchar(128) NOT NULL,
+  TXN_HOST varchar(128) NOT NULL
+);
+
+CREATE TABLE TXN_COMPONENTS (
+  TC_TXNID NUMBER(19) REFERENCES TXNS (TXN_ID),
+  TC_DATABASE VARCHAR2(128) NOT NULL,
+  TC_TABLE VARCHAR2(128),
+  TC_PARTITION VARCHAR2(767) NULL
+);
+
+CREATE TABLE COMPLETED_TXN_COMPONENTS (
+  CTC_TXNID NUMBER(19),
+  CTC_DATABASE varchar(128) NOT NULL,
+  CTC_TABLE varchar(128),
+  CTC_PARTITION varchar(767)
+);
+
+CREATE TABLE NEXT_TXN_ID (
+  NTXN_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_TXN_ID VALUES(1);
+
+CREATE TABLE HIVE_LOCKS (
+  HL_LOCK_EXT_ID NUMBER(19) NOT NULL,
+  HL_LOCK_INT_ID NUMBER(19) NOT NULL,
+  HL_TXNID NUMBER(19),
+  HL_DB VARCHAR2(128) NOT NULL,
+  HL_TABLE VARCHAR2(128),
+  HL_PARTITION VARCHAR2(767),
+  HL_LOCK_STATE CHAR(1) NOT NULL,
+  HL_LOCK_TYPE CHAR(1) NOT NULL,
+  HL_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+  HL_ACQUIRED_AT NUMBER(19),
+  HL_USER varchar(128) NOT NULL,
+  HL_HOST varchar(128) NOT NULL,
+  PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+); 
+
+CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
+
+CREATE TABLE NEXT_LOCK_ID (
+  NL_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_LOCK_ID VALUES(1);
+
+CREATE TABLE COMPACTION_QUEUE (
+  CQ_ID NUMBER(19) PRIMARY KEY,
+  CQ_DATABASE varchar(128) NOT NULL,
+  CQ_TABLE varchar(128) NOT NULL,
+  CQ_PARTITION varchar(767),
+  CQ_STATE char(1) NOT NULL,
+  CQ_TYPE char(1) NOT NULL,
+  CQ_WORKER_ID varchar(128),
+  CQ_START NUMBER(19),
+  CQ_RUN_AS varchar(128)
+);
+
+CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+  NCQ_NEXT NUMBER(19) NOT NULL
+);
+INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
 
 -- -----------------------------------------------------------------
 -- Record schema version. Should be the last step in the init script

Modified: hive/branches/spark/metastore/scripts/upgrade/oracle/upgrade.order.oracle
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/oracle/upgrade.order.oracle?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/oracle/upgrade.order.oracle (original)
+++ hive/branches/spark/metastore/scripts/upgrade/oracle/upgrade.order.oracle Thu Oct 30 16:22:33 2014
@@ -3,3 +3,4 @@
 0.11.0-to-0.12.0
 0.12.0-to-0.13.0
 0.13.0-to-0.14.0
+0.14.0-to-0.15.0

Modified: hive/branches/spark/metastore/scripts/upgrade/postgres/hive-schema-0.14.0.postgres.sql
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/postgres/hive-schema-0.14.0.postgres.sql?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/postgres/hive-schema-0.14.0.postgres.sql (original)
+++ hive/branches/spark/metastore/scripts/upgrade/postgres/hive-schema-0.14.0.postgres.sql Thu Oct 30 16:22:33 2014
@@ -1465,8 +1465,75 @@ GRANT ALL ON SCHEMA public TO PUBLIC;
 
 ------------------------------
 -- Transaction and lock tables
+-- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
 ------------------------------
-\i hive-txn-schema-0.13.0.postgres.sql;
+CREATE TABLE "txns" (
+  "txn_id" bigint PRIMARY KEY,
+  "txn_state" char(1) NOT NULL,
+  "txn_started" bigint NOT NULL,
+  "txn_last_heartbeat" bigint NOT NULL,
+  "txn_user" varchar(128) NOT NULL,
+  "txn_host" varchar(128) NOT NULL
+);
+
+CREATE TABLE "txn_components" (
+  "tc_txnid" bigint REFERENCES "txns" ("txn_id"),
+  "tc_database" varchar(128) NOT NULL,
+  "tc_table" varchar(128),
+  "tc_partition" varchar(767) DEFAULT NULL
+);
+
+CREATE TABLE "completed_txn_components" (
+  "ctc_txnid" bigint,
+  "ctc_database" varchar(128) NOT NULL,
+  "ctc_table" varchar(128),
+  "ctc_partition" varchar(767)
+);
+
+CREATE TABLE "next_txn_id" (
+  "ntxn_next" bigint NOT NULL
+);
+INSERT INTO "next_txn_id" VALUES(1);
+
+CREATE TABLE "hive_locks" (
+  "hl_lock_ext_id" bigint NOT NULL,
+  "hl_lock_int_id" bigint NOT NULL,
+  "hl_txnid" bigint,
+  "hl_db" varchar(128) NOT NULL,
+  "hl_table" varchar(128),
+  "hl_partition" varchar(767) DEFAULT NULL,
+  "hl_lock_state" char(1) NOT NULL,
+  "hl_lock_type" char(1) NOT NULL,
+  "hl_last_heartbeat" bigint NOT NULL,
+  "hl_acquired_at" bigint,
+  "hl_user" varchar(128) NOT NULL,
+  "hl_host" varchar(128) NOT NULL,
+  PRIMARY KEY("hl_lock_ext_id", "hl_lock_int_id")
+); 
+
+CREATE INDEX "hl_txnid_index" ON "hive_locks" USING hash ("hl_txnid");
+
+CREATE TABLE "next_lock_id" (
+  "nl_next" bigint NOT NULL
+);
+INSERT INTO "next_lock_id" VALUES(1);
+
+CREATE TABLE "compaction_queue" (
+  "cq_id" bigint PRIMARY KEY,
+  "cq_database" varchar(128) NOT NULL,
+  "cq_table" varchar(128) NOT NULL,
+  "cq_partition" varchar(767),
+  "cq_state" char(1) NOT NULL,
+  "cq_type" char(1) NOT NULL,
+  "cq_worker_id" varchar(128),
+  "cq_start" bigint,
+  "cq_run_as" varchar(128)
+);
+
+CREATE TABLE "next_compaction_queue_id" (
+  "ncq_next" bigint NOT NULL
+);
+INSERT INTO "next_compaction_queue_id" VALUES(1);
 
 -- -----------------------------------------------------------------
 -- Record schema version. Should be the last step in the init script

Modified: hive/branches/spark/metastore/scripts/upgrade/postgres/upgrade.order.postgres
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/scripts/upgrade/postgres/upgrade.order.postgres?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/scripts/upgrade/postgres/upgrade.order.postgres (original)
+++ hive/branches/spark/metastore/scripts/upgrade/postgres/upgrade.order.postgres Thu Oct 30 16:22:33 2014
@@ -7,3 +7,4 @@
 0.11.0-to-0.12.0
 0.12.0-to-0.13.0
 0.13.0-to-0.14.0
+0.14.0-to-0.15.0

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java Thu Oct 30 16:22:33 2014
@@ -354,7 +354,7 @@ public class HiveAlterHandler implements
           srcFs = wh.getFs(srcPath);
           destFs = wh.getFs(destPath);
           // check that src and dest are on the same file system
-          if (srcFs != destFs) {
+          if (!FileUtils.equalsFileSystem(srcFs, destFs)) {
             throw new InvalidOperationException("table new location " + destPath
               + " is on a different file system than the old location "
               + srcPath + ". This operation is not supported");

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Thu Oct 30 16:22:33 2014
@@ -48,6 +48,8 @@ import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 import java.util.regex.Pattern;
 
+import javax.jdo.JDOException;
+
 import org.apache.commons.cli.OptionBuilder;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -146,24 +148,30 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.UnknownTableException;
 import org.apache.hadoop.hive.metastore.api.UnlockRequest;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.events.AddIndexEvent;
 import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterIndexEvent;
 import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
 import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent;
 import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
 import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
 import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.DropIndexEvent;
 import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.DropTableEvent;
 import org.apache.hadoop.hive.metastore.events.EventCleanerTask;
 import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
+import org.apache.hadoop.hive.metastore.events.PreAddIndexEvent;
 import org.apache.hadoop.hive.metastore.events.PreAddPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.PreAlterIndexEvent;
 import org.apache.hadoop.hive.metastore.events.PreAlterPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent;
 import org.apache.hadoop.hive.metastore.events.PreAuthorizationCallEvent;
 import org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent;
 import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent;
 import org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropIndexEvent;
 import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.PreDropTableEvent;
 import org.apache.hadoop.hive.metastore.events.PreEventContext;
@@ -184,6 +192,7 @@ import org.apache.hadoop.hive.serde2.Des
 import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge;
+import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge.Server.ServerMode;
 import org.apache.hadoop.hive.thrift.TUGIContainingTransport;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ReflectionUtils;
@@ -570,22 +579,50 @@ public class HiveMetaStore extends Thrif
     }
 
     /**
-     * create default database if it doesn't exist
+     * create default database if it doesn't exist.
+     *
+     * This is a potential contention when HiveServer2 using embedded metastore and Metastore
+     * Server try to concurrently invoke createDefaultDB. If one failed, JDOException was caught
+     * for one more time try, if failed again, simply ignored by warning, which meant another
+     * succeeds.
      *
      * @throws MetaException
      */
     private void createDefaultDB() throws MetaException {
       try {
         createDefaultDB_core(getMS());
+      } catch (JDOException e) {
+        LOG.warn("Retrying creating default database after error: " + e.getMessage(), e);
+        try {
+          createDefaultDB_core(getMS());
+        } catch (InvalidObjectException e1) {
+          throw new MetaException(e1.getMessage());
+        }
       } catch (InvalidObjectException e) {
         throw new MetaException(e.getMessage());
-      } catch (MetaException e) {
-        throw e;
       }
     }
 
-
+    /**
+     * create default roles if they don't exist.
+     *
+     * This is a potential contention when HiveServer2 using embedded metastore and Metastore
+     * Server try to concurrently invoke createDefaultRoles. If one failed, JDOException was caught
+     * for one more time try, if failed again, simply ignored by warning, which meant another
+     * succeeds.
+     *
+     * @throws MetaException
+     */
     private void createDefaultRoles() throws MetaException {
+      try {
+        createDefaultRoles_core();
+      } catch (JDOException e) {
+        LOG.warn("Retrying creating default roles after error: " + e.getMessage(), e);
+        createDefaultRoles_core();
+      }
+    }
+
+    private void createDefaultRoles_core() throws MetaException {
 
       RawStore ms = getMS();
       try {
@@ -622,7 +659,25 @@ public class HiveMetaStore extends Thrif
       }
     }
 
+    /**
+     * add admin users if they don't exist.
+     *
+     * This is a potential contention when HiveServer2 using embedded metastore and Metastore
+     * Server try to concurrently invoke addAdminUsers. If one failed, JDOException was caught for
+     * one more time try, if failed again, simply ignored by warning, which meant another succeeds.
+     *
+     * @throws MetaException
+     */
     private void addAdminUsers() throws MetaException {
+      try {
+        addAdminUsers_core();
+      } catch (JDOException e) {
+        LOG.warn("Retrying adding admin users after error: " + e.getMessage(), e);
+        addAdminUsers_core();
+      }
+    }
+
+    private void addAdminUsers_core() throws MetaException {
 
       // now add pre-configured users to admin role
       String userStr = HiveConf.getVar(hiveConf,ConfVars.USERS_IN_ADMIN_ROLE,"").trim();
@@ -1381,17 +1436,15 @@ public class HiveMetaStore extends Thrif
       return (ms.getTable(dbname, name) != null);
     }
 
-    private void drop_table_core(final RawStore ms, final String dbname, final String name,
-        final boolean deleteData, final EnvironmentContext envContext)
-        throws NoSuchObjectException, MetaException, IOException,
-        InvalidObjectException, InvalidInputException {
+    private boolean drop_table_core(final RawStore ms, final String dbname, final String name,
+        final boolean deleteData, final EnvironmentContext envContext,
+        final String indexName) throws NoSuchObjectException,
+        MetaException, IOException, InvalidObjectException, InvalidInputException {
       boolean success = false;
       boolean isExternal = false;
       Path tblPath = null;
       List<Path> partPaths = null;
       Table tbl = null;
-      isExternal = false;
-      boolean isIndexTable = false;
       try {
         ms.openTransaction();
         // drop any partitions
@@ -1405,8 +1458,8 @@ public class HiveMetaStore extends Thrif
 
         firePreEvent(new PreDropTableEvent(tbl, deleteData, this));
 
-        isIndexTable = isIndexTable(tbl);
-        if (isIndexTable) {
+        boolean isIndexTable = isIndexTable(tbl);
+        if (indexName == null && isIndexTable) {
           throw new RuntimeException(
               "The table " + name + " is an index table. Please do drop index instead.");
         }
@@ -1428,7 +1481,8 @@ public class HiveMetaStore extends Thrif
         if (tbl.getSd().getLocation() != null) {
           tblPath = new Path(tbl.getSd().getLocation());
           if (!wh.isWritable(tblPath.getParent())) {
-            throw new MetaException("Table metadata not deleted since " +
+            String target = indexName == null ? "Table" : "Index table";
+            throw new MetaException(target + " metadata not deleted since " +
                 tblPath.getParent() + " is not writable by " +
                 hiveConf.getUser());
           }
@@ -1439,17 +1493,17 @@ public class HiveMetaStore extends Thrif
             tbl.getPartitionKeys(), deleteData && !isExternal);
 
         if (!ms.dropTable(dbname, name)) {
-          throw new MetaException("Unable to drop table");
+          String tableName = dbname + "." + name;
+          throw new MetaException(indexName == null ? "Unable to drop table " + tableName:
+              "Unable to drop index table " + tableName + " for index " + indexName);
         }
         success = ms.commitTransaction();
       } finally {
         if (!success) {
           ms.rollbackTransaction();
         } else if (deleteData && !isExternal) {
-          boolean ifPurge = false;
-          if (envContext != null){
-            ifPurge = Boolean.parseBoolean(envContext.getProperties().get("ifPurge"));
-          }
+          boolean ifPurge = envContext != null &&
+              Boolean.parseBoolean(envContext.getProperties().get("ifPurge"));
           // Delete the data in the partitions which have other locations
           deletePartitionData(partPaths, ifPurge);
           // Delete the data in the table
@@ -1462,6 +1516,7 @@ public class HiveMetaStore extends Thrif
           listener.onDropTable(dropTableEvent);
         }
       }
+      return success;
     }
 
     /**
@@ -1605,8 +1660,7 @@ public class HiveMetaStore extends Thrif
       boolean success = false;
       Exception ex = null;
       try {
-        drop_table_core(getMS(), dbname, name, deleteData, envContext);
-        success = true;
+        success = drop_table_core(getMS(), dbname, name, deleteData, envContext, null);
       } catch (IOException e) {
         ex = e;
         throw new MetaException(e.getMessage());
@@ -3161,7 +3215,12 @@ public class HiveMetaStore extends Thrif
 
       boolean success = false;
       Exception ex = null;
+      Index oldIndex = null;
       try {
+        oldIndex = get_index_by_name(dbname, base_table_name, index_name);
+
+        firePreEvent(new PreAlterIndexEvent(oldIndex, newIndex, this));
+
         getMS().alterIndex(dbname, base_table_name, index_name, newIndex);
         success = true;
       } catch (InvalidObjectException e) {
@@ -3178,6 +3237,10 @@ public class HiveMetaStore extends Thrif
         }
       } finally {
         endFunction("alter_index", success, ex, base_table_name);
+        for (MetaStoreEventListener listener : listeners) {
+          AlterIndexEvent alterIndexEvent = new AlterIndexEvent(oldIndex, newIndex, success, this);
+          listener.onAlterIndex(alterIndexEvent);
+        }
       }
       return;
     }
@@ -3307,7 +3370,7 @@ public class HiveMetaStore extends Thrif
           ret = tbl.getSd().getCols();
         } else {
           try {
-            Deserializer s = MetaStoreUtils.getDeserializer(hiveConf, tbl);
+            Deserializer s = MetaStoreUtils.getDeserializer(hiveConf, tbl, false);
             ret = MetaStoreUtils.getFieldsFromDeserializer(tableName, s);
           } catch (SerDeException e) {
             StringUtils.stringifyException(e);
@@ -3723,6 +3786,8 @@ public class HiveMetaStore extends Thrif
 
       try {
         ms.openTransaction();
+        firePreEvent(new PreAddIndexEvent(index, this));
+
         Index old_index = null;
         try {
           old_index = get_index_by_name(index.getDbName(), index
@@ -3770,6 +3835,10 @@ public class HiveMetaStore extends Thrif
           }
           ms.rollbackTransaction();
         }
+        for (MetaStoreEventListener listener : listeners) {
+          AddIndexEvent addIndexEvent = new AddIndexEvent(index, success, this);
+          listener.onAddIndex(addIndexEvent);
+        }
       }
     }
 
@@ -3804,16 +3873,17 @@ public class HiveMetaStore extends Thrif
         MetaException, TException, IOException, InvalidObjectException, InvalidInputException {
 
       boolean success = false;
+      Index index = null;
       Path tblPath = null;
       List<Path> partPaths = null;
       try {
         ms.openTransaction();
 
         // drop the underlying index table
-        Index index = get_index_by_name(dbName, tblName, indexName);
-        if (index == null) {
-          throw new NoSuchObjectException(indexName + " doesn't exist");
-        }
+        index = get_index_by_name(dbName, tblName, indexName);  // throws exception if not exists
+
+        firePreEvent(new PreDropIndexEvent(index, this));
+
         ms.dropIndex(dbName, tblName, indexName);
 
         String idxTblName = index.getIndexTableName();
@@ -3834,26 +3904,29 @@ public class HiveMetaStore extends Thrif
           }
 
           // Drop the partitions and get a list of partition locations which need to be deleted
-          partPaths = dropPartitionsAndGetLocations(ms, dbName, idxTblName, tblPath,
+          partPaths = dropPartitionsAndGetLocations(ms, qualified[0], qualified[1], tblPath,
               tbl.getPartitionKeys(), deleteData);
 
-          if (!ms.dropTable(dbName, idxTblName)) {
+          if (!ms.dropTable(qualified[0], qualified[1])) {
             throw new MetaException("Unable to drop underlying data table "
-                + idxTblName + " for index " + idxTblName);
+                + qualified[0] + "." + qualified[1] + " for index " + indexName);
           }
         }
         success = ms.commitTransaction();
       } finally {
         if (!success) {
           ms.rollbackTransaction();
-          return false;
         } else if (deleteData && tblPath != null) {
           deletePartitionData(partPaths);
           deleteTableData(tblPath);
           // ok even if the data is not deleted
         }
+        for (MetaStoreEventListener listener : listeners) {
+          DropIndexEvent dropIndexEvent = new DropIndexEvent(index, success, this);
+          listener.onDropIndex(dropIndexEvent);
+        }
       }
-      return true;
+      return success;
     }
 
     @Override
@@ -3872,7 +3945,7 @@ public class HiveMetaStore extends Thrif
         ex = e;
         rethrowException(e);
       } finally {
-        endFunction("drop_index_by_name", ret != null, ex, tblName);
+        endFunction("get_index_by_name", ret != null, ex, tblName);
       }
       return ret;
     }
@@ -5489,13 +5562,20 @@ public class HiveMetaStore extends Thrif
   }
 
 
-  public static IHMSHandler newHMSHandler(String name, HiveConf hiveConf) throws MetaException {
-    return newHMSHandler(name, hiveConf, false);
+  public static IHMSHandler newRetryingHMSHandler(IHMSHandler baseHandler, HiveConf hiveConf)
+      throws MetaException {
+    return newRetryingHMSHandler(baseHandler, hiveConf, false);
+  }
+
+  public static IHMSHandler newRetryingHMSHandler(IHMSHandler baseHandler, HiveConf hiveConf,
+      boolean local) throws MetaException {
+    return RetryingHMSHandler.getProxy(hiveConf, baseHandler, local);
   }
 
-  public static IHMSHandler newHMSHandler(String name, HiveConf hiveConf, boolean local)
+  public static Iface newRetryingHMSHandler(String name, HiveConf conf, boolean local)
       throws MetaException {
-    return RetryingHMSHandler.getProxy(hiveConf, name, local);
+    HMSHandler baseHandler = new HiveMetaStore.HMSHandler(name, conf, false);
+    return RetryingHMSHandler.getProxy(conf, baseHandler, local);
   }
 
   /**
@@ -5691,7 +5771,7 @@ public class HiveMetaStore extends Thrif
     try {
       isMetaStoreRemote = true;
       // Server will create new threads up to max as necessary. After an idle
-      // period, it will destory threads to keep the number of threads in the
+      // period, it will destroy threads to keep the number of threads in the
       // pool to min.
       int minWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMINTHREADS);
       int maxWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMAXTHREADS);
@@ -5704,6 +5784,9 @@ public class HiveMetaStore extends Thrif
 
       TProcessor processor;
       TTransportFactory transFactory;
+      HMSHandler baseHandler = new HiveMetaStore.HMSHandler("new db based metaserver", conf,
+          false);
+      IHMSHandler handler = newRetryingHMSHandler(baseHandler, conf);
       if (useSasl) {
         // we are in secure mode.
         if (useFramedTransport) {
@@ -5713,17 +5796,14 @@ public class HiveMetaStore extends Thrif
             conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_KEYTAB_FILE),
             conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL));
         // start delegation token manager
-        HMSHandler hmsHandler = new HMSHandler("new db based metaserver", conf);
-        saslServer.startDelegationTokenSecretManager(conf, hmsHandler);
+        saslServer.startDelegationTokenSecretManager(conf, baseHandler.getMS(), ServerMode.METASTORE);
         transFactory = saslServer.createTransportFactory(
                 MetaStoreUtils.getMetaStoreSaslProperties(conf));
         processor = saslServer.wrapProcessor(
-          new ThriftHiveMetastore.Processor<HMSHandler>(hmsHandler));
+          new ThriftHiveMetastore.Processor<IHMSHandler>(handler));
         LOG.info("Starting DB backed MetaStore Server in Secure Mode");
       } else {
         // we are in unsecure mode.
-        IHMSHandler handler = newHMSHandler("new db based metaserver", conf);
-
         if (conf.getBoolVar(ConfVars.METASTORE_EXECUTE_SET_UGI)) {
           transFactory = useFramedTransport ?
               new ChainedTTransportFactory(new TFramedTransport.Factory(),
@@ -5878,7 +5958,7 @@ public class HiveMetaStore extends Thrif
     LOG.info("Starting metastore thread of type " + thread.getClass().getName());
     thread.setHiveConf(conf);
     thread.setThreadId(nextThreadId++);
-    thread.init(new MetaStoreThread.BooleanPointer());
+    thread.init(new MetaStoreThread.BooleanPointer(), new MetaStoreThread.BooleanPointer());
     thread.start();
   }
 }

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java Thu Oct 30 16:22:33 2014
@@ -175,7 +175,7 @@ public class HiveMetaStoreClient impleme
     if (localMetaStore) {
       // instantiate the metastore server handler directly instead of connecting
       // through the network
-      client = HiveMetaStore.newHMSHandler("hive client", conf, true);
+      client = HiveMetaStore.newRetryingHMSHandler("hive client", conf, true);
       isConnected = true;
       snapshotActiveConf();
       return;

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java Thu Oct 30 16:22:33 2014
@@ -45,10 +45,13 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.SkewedInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
@@ -143,13 +146,18 @@ class MetaStoreDirectSql {
   }
 
   /**
-   * See {@link #trySetAnsiQuotesForMysql()}.
+   * This function is intended to be called by functions before they put together a query
+   * Thus, any query-specific instantiation to be done from within the transaction is done
+   * here - for eg., for MySQL, we signal that we want to use ANSI SQL quoting behaviour
    */
-  private void setAnsiQuotesForMysql() throws MetaException {
-    try {
-      trySetAnsiQuotesForMysql();
-    } catch (SQLException sqlEx) {
-      throw new MetaException("Error setting ansi quotes: " + sqlEx.getMessage());
+  private void doDbSpecificInitializationsBeforeQuery() throws MetaException {
+    if (isMySql){
+      try {
+        assert pm.currentTransaction().isActive(); // must be inside tx together with queries
+        trySetAnsiQuotesForMysql();
+      } catch (SQLException sqlEx) {
+        throw new MetaException("Error setting ansi quotes: " + sqlEx.getMessage());
+      }
     }
   }
 
@@ -171,6 +179,78 @@ class MetaStoreDirectSql {
     }
   }
 
+  public Database getDatabase(String dbName) throws MetaException{
+    Query queryDbSelector = null;
+    Query queryDbParams = null;
+    try {
+      dbName = dbName.toLowerCase();
+
+      doDbSpecificInitializationsBeforeQuery();
+
+      String queryTextDbSelector= "select "
+          + "\"DB_ID\", \"NAME\", \"DB_LOCATION_URI\", \"DESC\", "
+          + "\"OWNER_NAME\", \"OWNER_TYPE\" "
+          + "FROM \"DBS\" where \"NAME\" = ? ";
+      Object[] params = new Object[] { dbName };
+      queryDbSelector = pm.newQuery("javax.jdo.query.SQL", queryTextDbSelector);
+
+      LOG.debug("getDatabase:query instantiated : " + queryTextDbSelector + " with param ["+params[0]+"]");
+
+      List<Object[]> sqlResult = (List<Object[]>)queryDbSelector.executeWithArray(params);
+      if ((sqlResult == null) || sqlResult.isEmpty()) {
+        LOG.debug("getDatabase:queryDbSelector ran, returned no/empty results, returning NoSuchObjectException");
+        throw new MetaException("There is no database named " + dbName);
+      }
+
+      assert(sqlResult.size() == 1);
+      if (sqlResult.get(0) == null){
+        LOG.debug("getDatabase:queryDbSelector ran, returned results, but the result entry was null, returning NoSuchObjectException");
+        throw new MetaException("There is no database named " + dbName);
+      }
+
+      Object[] dbline = sqlResult.get(0);
+      Long dbid = StatObjectConverter.extractSqlLong(dbline[0]);
+
+      String queryTextDbParams = "select \"PARAM_KEY\", \"PARAM_VALUE\" "
+          + " FROM \"DATABASE_PARAMS\" "
+          + " WHERE \"DB_ID\" = ? "
+          + " AND \"PARAM_KEY\" IS NOT NULL";
+      Object[] params2 = new Object[] { dbid };
+      queryDbParams = pm.newQuery("javax.jdo.query.SQL",queryTextDbParams);
+      LOG.debug("getDatabase:query2 instantiated : " + queryTextDbParams + " with param ["+params2[0]+"]");
+
+      Map<String,String> dbParams = new HashMap<String,String>();
+      List<Object[]> sqlResult2 = ensureList(queryDbParams.executeWithArray(params2));
+      if (!sqlResult2.isEmpty()){
+        for (Object[] line : sqlResult2){
+          dbParams.put(extractSqlString(line[0]),extractSqlString(line[1]));
+        }
+      }
+      LOG.debug("getDatabase: instantiating db object to return");
+      Database db = new Database();
+      db.setName(extractSqlString(dbline[1]));
+      db.setLocationUri(extractSqlString(dbline[2]));
+      db.setDescription(extractSqlString(dbline[3]));
+      db.setOwnerName(extractSqlString(dbline[4]));
+      String type = extractSqlString(dbline[5]);
+      db.setOwnerType((null == type || type.trim().isEmpty()) ? null : PrincipalType.valueOf(type));
+      db.setParameters(dbParams);
+      if (LOG.isDebugEnabled()){
+        LOG.debug("getDatabase: directsql returning db " + db.getName()
+            + " locn["+db.getLocationUri()  +"] desc [" +db.getDescription()
+            + "] owner [" + db.getOwnerName() + "] ownertype ["+ db.getOwnerType() +"]");
+      }
+      return db;
+    } finally {
+      if (queryDbSelector != null){
+        queryDbSelector.closeAll();
+      }
+      if (queryDbParams != null){
+        queryDbParams.closeAll();
+      }
+    }
+  }
+
   /**
    * Gets partitions by using direct SQL queries.
    * @param dbName Metastore db name.
@@ -260,10 +340,8 @@ class MetaStoreDirectSql {
     tblName = tblName.toLowerCase();
     // We have to be mindful of order during filtering if we are not returning all partitions.
     String orderForFilter = (max != null) ? " order by \"PART_NAME\" asc" : "";
-    if (isMySql) {
-      assert pm.currentTransaction().isActive();
-      setAnsiQuotesForMysql(); // must be inside tx together with queries
-    }
+
+    doDbSpecificInitializationsBeforeQuery();
 
     // Get all simple fields for partitions and related objects, which we can map one-on-one.
     // We will do this in 2 queries to use different existing indices for each one.
@@ -621,6 +699,11 @@ class MetaStoreDirectSql {
     return ((Number)field).intValue();
   }
 
+  private String extractSqlString(Object value) {
+    if (value == null) return null;
+    return value.toString();
+  }
+
   private static String trimCommaList(StringBuilder sb) {
     if (sb.length() > 0) {
       sb.setLength(sb.length() - 1);

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreEventListener.java Thu Oct 30 16:22:33 2014
@@ -21,6 +21,8 @@ package org.apache.hadoop.hive.metastore
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.events.AddIndexEvent;
+import org.apache.hadoop.hive.metastore.events.AlterIndexEvent;
 import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
@@ -28,6 +30,7 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
 import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
 import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.DropIndexEvent;
 import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.DropTableEvent;
 import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
@@ -117,7 +120,27 @@ public abstract class MetaStoreEventList
    * @throws MetaException
    */
   public void onLoadPartitionDone(LoadPartitionDoneEvent partSetDoneEvent) throws MetaException {
+  }
+
+  /**
+   * @param indexEvent index event
+   * @throws MetaException
+   */
+  public void onAddIndex(AddIndexEvent indexEvent) throws MetaException {
+  }
+
+  /**
+   * @param indexEvent index event
+   * @throws MetaException
+   */
+  public void onDropIndex(DropIndexEvent indexEvent) throws MetaException {
+  }
 
+  /**
+   * @param indexEvent index event
+   * @throws MetaException
+   */
+  public void onAlterIndex(AlterIndexEvent indexEvent) throws MetaException {
   }
 
   @Override

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java Thu Oct 30 16:22:33 2014
@@ -41,25 +41,30 @@ public class MetaStoreInit {
   }
 
   /**
-   * Updates the connection URL in hiveConf using the hook
-   *
+   * Updates the connection URL in hiveConf using the hook (if a hook has been
+   * set using hive.metastore.ds.connection.url.hook property)
+   * @param originalConf - original configuration used to look up hook settings
+   * @param activeConf - the configuration file in use for looking up db url
+   * @param badUrl
+   * @param updateData - hook information
    * @return true if a new connection URL was loaded into the thread local
    *         configuration
+   * @throws MetaException
    */
-  static boolean updateConnectionURL(HiveConf hiveConf, Configuration conf,
+  static boolean updateConnectionURL(HiveConf originalConf, Configuration activeConf,
     String badUrl, MetaStoreInitData updateData)
       throws MetaException {
     String connectUrl = null;
-    String currentUrl = MetaStoreInit.getConnectionURL(conf);
+    String currentUrl = MetaStoreInit.getConnectionURL(activeConf);
     try {
       // We always call init because the hook name in the configuration could
       // have changed.
-      MetaStoreInit.initConnectionUrlHook(hiveConf, updateData);
+      MetaStoreInit.initConnectionUrlHook(originalConf, updateData);
       if (updateData.urlHook != null) {
         if (badUrl != null) {
           updateData.urlHook.notifyBadConnectionUrl(badUrl);
         }
-        connectUrl = updateData.urlHook.getJdoConnectionUrl(hiveConf);
+        connectUrl = updateData.urlHook.getJdoConnectionUrl(originalConf);
       }
     } catch (Exception e) {
       LOG.error("Exception while getting connection URL from the hook: " +
@@ -71,7 +76,7 @@ public class MetaStoreInit {
           String.format("Overriding %s with %s",
               HiveConf.ConfVars.METASTORECONNECTURLKEY.toString(),
               connectUrl));
-      conf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.toString(),
+      activeConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.toString(),
           connectUrl);
       return true;
     }

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java Thu Oct 30 16:22:33 2014
@@ -37,6 +37,7 @@ public class MetaStoreSchemaInfo {
   private static String UPGRADE_FILE_PREFIX="upgrade-";
   private static String INIT_FILE_PREFIX="hive-schema-";
   private static String VERSION_UPGRADE_LIST = "upgrade.order";
+  private static String PRE_UPGRADE_PREFIX = "pre-";
   private final String dbType;
   private final String hiveSchemaVersions[];
   private final HiveConf hiveConf;
@@ -138,6 +139,10 @@ public class MetaStoreSchemaInfo {
     return UPGRADE_FILE_PREFIX +  fileVersion + "." + dbType + SQL_FILE_EXTENSION;
   }
 
+  public static String getPreUpgradeScriptName(int index, String upgradeScriptName) {
+    return PRE_UPGRADE_PREFIX + index + "-" + upgradeScriptName;
+  }
+
   public static String getHiveSchemaVersion() {
     String hiveVersion = HiveVersionInfo.getShortVersion();
     // if there is an equivalent version, return that, else return this version
@@ -149,4 +154,4 @@ public class MetaStoreSchemaInfo {
     }
   }
 
-}
+}
\ No newline at end of file

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java Thu Oct 30 16:22:33 2014
@@ -43,8 +43,13 @@ public interface MetaStoreThread {
    * have been called.
    * @param stop a flag to watch for when to stop.  If this value is set to true,
    *             the thread will terminate the next time through its main loop.
+   * @param looped a flag that is set to true everytime a thread goes through it's main loop.
+   *               This is purely for testing so that tests can assure themselves that the thread
+   *               has run through it's loop once.  The test can set this value to false.  The
+   *               thread should then assure that the loop has been gone completely through at
+   *               least once.
    */
-  void init(BooleanPointer stop) throws MetaException;
+  void init(BooleanPointer stop, BooleanPointer looped) throws MetaException;
 
   /**
    * Run the thread in the background.  This must not be called until

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java Thu Oct 30 16:22:33 2014
@@ -357,15 +357,21 @@ public class MetaStoreUtils {
    *
    */
   static public Deserializer getDeserializer(Configuration conf,
-      org.apache.hadoop.hive.metastore.api.Table table) throws MetaException {
+      org.apache.hadoop.hive.metastore.api.Table table, boolean skipConfError) throws
+          MetaException {
     String lib = table.getSd().getSerdeInfo().getSerializationLib();
     if (lib == null) {
       return null;
     }
     try {
       Deserializer deserializer = ReflectionUtils.newInstance(conf.getClassByName(lib).
-        asSubclass(Deserializer.class), conf);
-      SerDeUtils.initializeSerDe(deserializer, conf, MetaStoreUtils.getTableMetadata(table), null);
+              asSubclass(Deserializer.class), conf);
+      if (skipConfError) {
+        SerDeUtils.initializeSerDeWithoutErrorCheck(deserializer, conf,
+                MetaStoreUtils.getTableMetadata(table), null);
+      } else {
+        SerDeUtils.initializeSerDe(deserializer, conf, MetaStoreUtils.getTableMetadata(table), null);
+      }
       return deserializer;
     } catch (RuntimeException e) {
       throw e;
@@ -376,6 +382,12 @@ public class MetaStoreUtils {
     }
   }
 
+  public static Class<? extends Deserializer> getDeserializerClass(
+      Configuration conf, org.apache.hadoop.hive.metastore.api.Table table) throws Exception {
+    String lib = table.getSd().getSerdeInfo().getSerializationLib();
+    return lib == null ? null : conf.getClassByName(lib).asSubclass(Deserializer.class);
+  }
+
   /**
    * getDeserializer
    *

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java Thu Oct 30 16:22:33 2014
@@ -64,7 +64,6 @@ import org.apache.hadoop.hive.common.cla
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
-import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
@@ -92,7 +91,6 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.ResourceUri;
 import org.apache.hadoop.hive.metastore.api.Role;
 import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
 import org.apache.hadoop.hive.metastore.api.SkewedInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
@@ -197,7 +195,7 @@ public class ObjectStore implements RawS
   }
 
   /**
-   * Called whenever this object is instantiated using ReflectionUils, and also
+   * Called whenever this object is instantiated using ReflectionUtils, and also
    * on connection retries. In cases of connection retries, conf will usually
    * contain modified values.
    */
@@ -523,6 +521,34 @@ public class ObjectStore implements RawS
 
   @Override
   public Database getDatabase(String name) throws NoSuchObjectException {
+    try {
+      return getDatabaseInternal(name);
+    } catch (MetaException e) {
+      // Signature restriction to NSOE, and NSOE being a flat exception prevents us from
+      // setting the cause of the NSOE as the MetaException. We should not lose the info
+      // we got here, but it's very likely that the MetaException is irrelevant and is
+      // actually an NSOE message, so we should log it and throw an NSOE with the msg.
+      LOG.warn("Got a MetaException trying to call getDatabase("
+          +name+"), returning NoSuchObjectException", e);
+      throw new NoSuchObjectException(e.getMessage());
+    }
+  }
+
+  public Database getDatabaseInternal(String name) throws MetaException, NoSuchObjectException {
+    return new GetDbHelper(name, null, true, true) {
+      @Override
+      protected Database getSqlResult(GetHelper<Database> ctx) throws MetaException {
+        return directSql.getDatabase(dbName);
+      }
+
+      @Override
+      protected Database getJdoResult(GetHelper<Database> ctx) throws MetaException, NoSuchObjectException {
+        return getJDODatabase(dbName);
+      }
+    }.run(false);
+   }
+
+  public Database getJDODatabase(String name) throws NoSuchObjectException {
     MDatabase mdb = null;
     boolean commited = false;
     try {
@@ -2282,7 +2308,14 @@ public class ObjectStore implements RawS
       assert allowSql || allowJdo;
       this.allowJdo = allowJdo;
       this.dbName = dbName.toLowerCase();
-      this.tblName = tblName.toLowerCase();
+      if (tblName != null){
+        this.tblName = tblName.toLowerCase();
+      } else {
+        // tblName can be null in cases of Helper being used at a higher
+        // abstraction level, such as with datbases
+        this.tblName = null;
+        this.table = null;
+      }
       this.doTrace = LOG.isDebugEnabled();
       this.isInTxn = isActiveTransaction();
 
@@ -2331,7 +2364,7 @@ public class ObjectStore implements RawS
     private void start(boolean initTable) throws MetaException, NoSuchObjectException {
       start = doTrace ? System.nanoTime() : 0;
       openTransaction();
-      if (initTable) {
+      if (initTable && (tblName != null)) {
         table = ensureGetTable(dbName, tblName);
       }
     }
@@ -2398,6 +2431,27 @@ public class ObjectStore implements RawS
     }
   }
 
+  private abstract class GetDbHelper extends GetHelper<Database> {
+    /**
+     * GetHelper for returning db info using directSql/JDO.
+     * Since this is a db-level call, tblName is ignored, and null is passed irrespective of what is passed in.
+     * @param dbName The Database Name
+     * @param tblName Placeholder param to match signature, always ignored.
+     * @param allowSql Whether or not we allow DirectSQL to perform this query.
+     * @param allowJdo Whether or not we allow ORM to perform this query.
+     * @throws MetaException
+     */
+    public GetDbHelper(
+        String dbName, String tblName, boolean allowSql, boolean allowJdo) throws MetaException {
+      super(dbName,null,allowSql,allowJdo);
+    }
+
+    @Override
+    protected String describeResult() {
+      return "db details for db " + dbName;
+    }
+  }
+
   private abstract class GetStatHelper extends GetHelper<ColumnStatistics> {
     public GetStatHelper(
         String dbName, String tblName, boolean allowSql, boolean allowJdo) throws MetaException {
@@ -2665,7 +2719,7 @@ public class ObjectStore implements RawS
         throw new MetaException("table " + name + " doesn't exist");
       }
 
-      // For now only alter name, owner, paramters, cols, bucketcols are allowed
+      // For now only alter name, owner, parameters, cols, bucketcols are allowed
       oldt.setDatabase(newt.getDatabase());
       oldt.setTableName(newt.getTableName().toLowerCase());
       oldt.setParameters(newt.getParameters());
@@ -2708,7 +2762,7 @@ public class ObjectStore implements RawS
         throw new MetaException("index " + name + " doesn't exist");
       }
 
-      // For now only alter paramters are allowed
+      // For now only alter parameters are allowed
       oldi.setParameters(newi.getParameters());
 
       // commit the changes
@@ -2878,7 +2932,7 @@ public class ObjectStore implements RawS
     MColumnDescriptor mcd = msd.getCD();
     // Because there is a 1-N relationship between CDs and SDs,
     // we must set the SD's CD to null first before dropping the storage descriptor
-    // to satisfy foriegn key constraints.
+    // to satisfy foreign key constraints.
     msd.setCD(null);
     removeUnusedColumnDescriptor(mcd);
   }
@@ -3019,19 +3073,26 @@ public class ObjectStore implements RawS
   }
 
   private Index convertToIndex(MIndex mIndex) throws MetaException {
-    if(mIndex == null) {
+    if (mIndex == null) {
       return null;
     }
 
+    MTable origTable = mIndex.getOrigTable();
+    MTable indexTable = mIndex.getIndexTable();
+
+    String[] qualified = MetaStoreUtils.getQualifiedName(
+        origTable.getDatabase().getName(), indexTable.getTableName());
+    String indexTableName = qualified[0] + "." + qualified[1];
+
     return new Index(
     mIndex.getIndexName(),
     mIndex.getIndexHandlerClass(),
-    mIndex.getOrigTable().getDatabase().getName(),
-    mIndex.getOrigTable().getTableName(),
+    origTable.getDatabase().getName(),
+    origTable.getTableName(),
     mIndex.getCreateTime(),
     mIndex.getLastAccessTime(),
-    mIndex.getIndexTable().getTableName(),
-    this.convertToStorageDescriptor(mIndex.getSd()),
+    indexTableName,
+    convertToStorageDescriptor(mIndex.getSd()),
     mIndex.getParameters(),
     mIndex.getDeferredRebuild());
 

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java Thu Oct 30 16:22:33 2014
@@ -41,34 +41,34 @@ public class RetryingHMSHandler implemen
 
   private static final Log LOG = LogFactory.getLog(RetryingHMSHandler.class);
 
-  private final IHMSHandler base;
+  private final IHMSHandler baseHandler;
   private final MetaStoreInit.MetaStoreInitData metaStoreInitData =
     new MetaStoreInit.MetaStoreInitData();
 
-  private final HiveConf hiveConf;            // base configuration
-  private final Configuration configuration;  // active configuration
+  private final HiveConf origConf;            // base configuration
+  private final Configuration activeConf;  // active configuration
 
-  private RetryingHMSHandler(HiveConf hiveConf, String name, boolean local) throws MetaException {
-    this.hiveConf = hiveConf;
-    this.base = new HiveMetaStore.HMSHandler(name, hiveConf, false);
+  private RetryingHMSHandler(HiveConf hiveConf, IHMSHandler baseHandler, boolean local) throws MetaException {
+    this.origConf = hiveConf;
+    this.baseHandler = baseHandler;
     if (local) {
-      base.setConf(hiveConf); // tests expect configuration changes applied directly to metastore
+      baseHandler.setConf(hiveConf); // tests expect configuration changes applied directly to metastore
     }
-    configuration = base.getConf();
+    activeConf = baseHandler.getConf();
 
     // This has to be called before initializing the instance of HMSHandler
     // Using the hook on startup ensures that the hook always has priority
     // over settings in *.xml.  The thread local conf needs to be used because at this point
     // it has already been initialized using hiveConf.
-    MetaStoreInit.updateConnectionURL(hiveConf, getConf(), null, metaStoreInitData);
+    MetaStoreInit.updateConnectionURL(hiveConf, getActiveConf(), null, metaStoreInitData);
 
-    base.init();
+    baseHandler.init();
   }
 
-  public static IHMSHandler getProxy(HiveConf hiveConf, String name, boolean local)
+  public static IHMSHandler getProxy(HiveConf hiveConf, IHMSHandler baseHandler, boolean local)
       throws MetaException {
 
-    RetryingHMSHandler handler = new RetryingHMSHandler(hiveConf, name, local);
+    RetryingHMSHandler handler = new RetryingHMSHandler(hiveConf, baseHandler, local);
 
     return (IHMSHandler) Proxy.newProxyInstance(
       RetryingHMSHandler.class.getClassLoader(),
@@ -79,15 +79,15 @@ public class RetryingHMSHandler implemen
   public Object invoke(final Object proxy, final Method method, final Object[] args) throws Throwable {
 
     boolean gotNewConnectUrl = false;
-    boolean reloadConf = HiveConf.getBoolVar(hiveConf,
+    boolean reloadConf = HiveConf.getBoolVar(origConf,
         HiveConf.ConfVars.HMSHANDLERFORCERELOADCONF);
-    long retryInterval = HiveConf.getTimeVar(hiveConf,
+    long retryInterval = HiveConf.getTimeVar(origConf,
         HiveConf.ConfVars.HMSHANDLERINTERVAL, TimeUnit.MILLISECONDS);
-    int retryLimit = HiveConf.getIntVar(hiveConf,
+    int retryLimit = HiveConf.getIntVar(origConf,
         HiveConf.ConfVars.HMSHANDLERATTEMPTS);
 
     if (reloadConf) {
-      MetaStoreInit.updateConnectionURL(hiveConf, getConf(),
+      MetaStoreInit.updateConnectionURL(origConf, getActiveConf(),
         null, metaStoreInitData);
     }
 
@@ -96,9 +96,9 @@ public class RetryingHMSHandler implemen
     while (true) {
       try {
         if (reloadConf || gotNewConnectUrl) {
-          base.setConf(getConf());
+          baseHandler.setConf(getActiveConf());
         }
-        return method.invoke(base, args);
+        return method.invoke(baseHandler, args);
 
       } catch (javax.jdo.JDOException e) {
         caughtException = e;
@@ -158,13 +158,13 @@ public class RetryingHMSHandler implemen
       Thread.sleep(retryInterval);
       // If we have a connection error, the JDO connection URL hook might
       // provide us with a new URL to access the datastore.
-      String lastUrl = MetaStoreInit.getConnectionURL(getConf());
-      gotNewConnectUrl = MetaStoreInit.updateConnectionURL(hiveConf, getConf(),
+      String lastUrl = MetaStoreInit.getConnectionURL(getActiveConf());
+      gotNewConnectUrl = MetaStoreInit.updateConnectionURL(origConf, getActiveConf(),
         lastUrl, metaStoreInitData);
     }
   }
 
-  public Configuration getConf() {
-    return configuration;
+  public Configuration getActiveConf() {
+    return activeConf;
   }
 }

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java Thu Oct 30 16:22:33 2014
@@ -417,6 +417,7 @@ public class StatObjectConverter {
   public static void fillColumnStatisticsData(String colType, ColumnStatisticsData data,
       Object llow, Object lhigh, Object dlow, Object dhigh, Object declow, Object dechigh,
       Object nulls, Object dist, Object avglen, Object maxlen, Object trues, Object falses) throws MetaException {
+    colType = colType.toLowerCase();
     if (colType.equals("boolean")) {
       BooleanColumnStatsData boolStats = new BooleanColumnStatsData();
       boolStats.setNumFalses(extractSqlLong(falses));

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java Thu Oct 30 16:22:33 2014
@@ -39,7 +39,10 @@ public abstract class PreEventContext {
     LOAD_PARTITION_DONE,
     AUTHORIZATION_API_CALL,
     READ_TABLE,
-    READ_DATABASE
+    READ_DATABASE,
+    ADD_INDEX,
+    ALTER_INDEX,
+    DROP_INDEX
   }
 
   private final PreEventType eventType;

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java Thu Oct 30 16:22:33 2014
@@ -547,10 +547,27 @@ public class CompactionTxnHandler extend
     Statement stmt = null;
     ResultSet rs = null;
     try {
+      String quote = getIdentifierQuoteString(dbConn);
       stmt = dbConn.createStatement();
-      String s = "SELECT COLUMN_NAME FROM " + (ci.partName == null ? "TAB_COL_STATS" : "PART_COL_STATS")
+      StringBuilder bldr = new StringBuilder();
+      bldr.append("SELECT ").append(quote).append("COLUMN_NAME").append(quote)
+          .append(" FROM ")
+          .append(quote).append((ci.partName == null ? "TAB_COL_STATS" : "PART_COL_STATS"))
+              .append(quote)
+          .append(" WHERE ")
+          .append(quote).append("DB_NAME").append(quote).append(" = '").append(ci.dbname)
+              .append("' AND ").append(quote).append("TABLE_NAME").append(quote)
+              .append(" = '").append(ci.tableName).append("'");
+      if (ci.partName != null) {
+        bldr.append(" AND ").append(quote).append("PARTITION_NAME").append(quote).append(" = '")
+            .append(ci.partName).append("'");
+      }
+      String s = bldr.toString();
+
+      /*String s = "SELECT COLUMN_NAME FROM " + (ci.partName == null ? "TAB_COL_STATS" :
+          "PART_COL_STATS")
          + " WHERE DB_NAME='" + ci.dbname + "' AND TABLE_NAME='" + ci.tableName + "'"
-        + (ci.partName == null ? "" : " AND PARTITION_NAME='" + ci.partName + "'");
+        + (ci.partName == null ? "" : " AND PARTITION_NAME='" + ci.partName + "'");*/
       LOG.debug("Going to execute <" + s + ">");
       rs = stmt.executeQuery(s);
       List<String> columns = new ArrayList<String>();

Modified: hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java?rev=1635536&r1=1635535&r2=1635536&view=diff
==============================================================================
--- hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java (original)
+++ hive/branches/spark/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java Thu Oct 30 16:22:33 2014
@@ -90,6 +90,8 @@ public class TxnHandler {
   // Transaction timeout, in milliseconds.
   private long timeout;
 
+  private String identifierQuoteString; // quotes to use for quoting tables, where necessary
+
   // DEADLOCK DETECTION AND HANDLING
   // A note to developers of this class.  ALWAYS access HIVE_LOCKS before TXNS to avoid deadlock
   // between simultaneous accesses.  ALWAYS access TXN_COMPONENTS before HIVE_LOCKS .
@@ -960,6 +962,19 @@ public class TxnHandler {
     }
   }
 
+  /**
+   * Determine the String that should be used to quote identifiers.
+   * @param conn Active connection
+   * @return quotes
+   * @throws SQLException
+   */
+  protected String getIdentifierQuoteString(Connection conn) throws SQLException {
+    if (identifierQuoteString == null) {
+      identifierQuoteString = conn.getMetaData().getIdentifierQuoteString();
+    }
+    return identifierQuoteString;
+  }
+
   protected enum DatabaseProduct { DERBY, MYSQL, POSTGRES, ORACLE, SQLSERVER}
 
   /**