You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by pr...@apache.org on 2014/10/14 21:07:05 UTC

svn commit: r1631841 [6/42] - in /hive/branches/llap: ./ accumulo-handler/ accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/ accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/ accumulo-handler/src/java/org/apache/hadoop/hive...

Modified: hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/Utils.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/Utils.java?rev=1631841&r1=1631840&r2=1631841&view=diff
==============================================================================
--- hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/Utils.java (original)
+++ hive/branches/llap/jdbc/src/java/org/apache/hive/jdbc/Utils.java Tue Oct 14 19:06:45 2014
@@ -19,17 +19,24 @@
 package org.apache.hive.jdbc;
 
 import java.net.URI;
+import java.net.URISyntaxException;
 import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.LinkedHashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hive.service.cli.HiveSQLException;
 import org.apache.hive.service.cli.thrift.TStatus;
 import org.apache.hive.service.cli.thrift.TStatusCode;
 
 public class Utils {
+  public static final Log LOG = LogFactory.getLog(Utils.class.getName());
   /**
     * The required prefix for the connection URL.
     */
@@ -47,14 +54,69 @@ public class Utils {
 
   private static final String URI_JDBC_PREFIX = "jdbc:";
 
+  private static final String URI_HIVE_PREFIX = "hive2:";
+
   public static class JdbcConnectionParams {
+    // Note on client side parameter naming convention:
+    // Prefer using a shorter camelCase param name instead of using the same name as the
+    // corresponding
+    // HiveServer2 config.
+    // For a jdbc url: jdbc:hive2://<host>:<port>/dbName;sess_var_list?hive_conf_list#hive_var_list,
+    // client side params are specified in sess_var_list
+
+    // Client param names:
+    static final String AUTH_TYPE = "auth";
+    // We're deprecating this variable's name.
+    static final String AUTH_QOP_DEPRECATED = "sasl.qop";
+    static final String AUTH_QOP = "saslQop";
+    static final String AUTH_SIMPLE = "noSasl";
+    static final String AUTH_TOKEN = "delegationToken";
+    static final String AUTH_USER = "user";
+    static final String AUTH_PRINCIPAL = "principal";
+    static final String AUTH_PASSWD = "password";
+    static final String AUTH_KERBEROS_AUTH_TYPE = "kerberosAuthType";
+    static final String AUTH_KERBEROS_AUTH_TYPE_FROM_SUBJECT = "fromSubject";
+    static final String ANONYMOUS_USER = "anonymous";
+    static final String ANONYMOUS_PASSWD = "anonymous";
+    static final String USE_SSL = "ssl";
+    static final String SSL_TRUST_STORE = "sslTrustStore";
+    static final String SSL_TRUST_STORE_PASSWORD = "trustStorePassword";
+    // We're deprecating the name and placement of this in the parsed map (from hive conf vars to
+    // hive session vars).
+    static final String TRANSPORT_MODE_DEPRECATED = "hive.server2.transport.mode";
+    static final String TRANSPORT_MODE = "transportMode";
+    // We're deprecating the name and placement of this in the parsed map (from hive conf vars to
+    // hive session vars).
+    static final String HTTP_PATH_DEPRECATED = "hive.server2.thrift.http.path";
+    static final String HTTP_PATH = "httpPath";
+    static final String SERVICE_DISCOVERY_MODE = "serviceDiscoveryMode";
+    // Don't use dynamic serice discovery
+    static final String SERVICE_DISCOVERY_MODE_NONE = "none";
+    // Use ZooKeeper for indirection while using dynamic service discovery
+    static final String SERVICE_DISCOVERY_MODE_ZOOKEEPER = "zooKeeper";
+    static final String ZOOKEEPER_NAMESPACE = "zooKeeperNamespace";
+    // Default namespace value on ZooKeeper.
+    // This value is used if the param "zooKeeperNamespace" is not specified in the JDBC Uri.
+    static final String ZOOKEEPER_DEFAULT_NAMESPACE = "hiveserver2";
+
+    // Non-configurable params:
+    // ZOOKEEPER_SESSION_TIMEOUT is not exposed as client configurable
+    static final int ZOOKEEPER_SESSION_TIMEOUT = 600 * 1000;
+    // Currently supports JKS keystore format
+    static final String SSL_TRUST_STORE_TYPE = "JKS";
+
     private String host = null;
     private int port;
+    private String jdbcUriString;
     private String dbName = DEFAULT_DATABASE;
     private Map<String,String> hiveConfs = new LinkedHashMap<String,String>();
     private Map<String,String> hiveVars = new LinkedHashMap<String,String>();
     private Map<String,String> sessionVars = new LinkedHashMap<String,String>();
     private boolean isEmbeddedMode = false;
+    private String[] authorityList;
+    private String zooKeeperEnsemble = null;
+    private String currentHostZnodePath;
+    private List<String> rejectedHostZnodePaths = new ArrayList<String>();
 
     public JdbcConnectionParams() {
     }
@@ -62,46 +124,94 @@ public class Utils {
     public String getHost() {
       return host;
     }
+
     public int getPort() {
       return port;
     }
+
+    public String getJdbcUriString() {
+      return jdbcUriString;
+    }
+
     public String getDbName() {
       return dbName;
     }
+
     public Map<String, String> getHiveConfs() {
       return hiveConfs;
     }
-    public Map<String,String> getHiveVars() {
+
+    public Map<String, String> getHiveVars() {
       return hiveVars;
     }
+
     public boolean isEmbeddedMode() {
       return isEmbeddedMode;
     }
+
     public Map<String, String> getSessionVars() {
       return sessionVars;
     }
 
+    public String[] getAuthorityList() {
+      return authorityList;
+    }
+
+    public String getZooKeeperEnsemble() {
+      return zooKeeperEnsemble;
+    }
+
+    public List<String> getRejectedHostZnodePaths() {
+      return rejectedHostZnodePaths;
+    }
+
+    public String getCurrentHostZnodePath() {
+      return currentHostZnodePath;
+    }
+
     public void setHost(String host) {
       this.host = host;
     }
+
     public void setPort(int port) {
       this.port = port;
     }
+
+    public void setJdbcUriString(String jdbcUriString) {
+      this.jdbcUriString = jdbcUriString;
+    }
+
     public void setDbName(String dbName) {
       this.dbName = dbName;
     }
+
     public void setHiveConfs(Map<String, String> hiveConfs) {
       this.hiveConfs = hiveConfs;
     }
-    public void setHiveVars(Map<String,String> hiveVars) {
+
+    public void setHiveVars(Map<String, String> hiveVars) {
       this.hiveVars = hiveVars;
     }
+
     public void setEmbeddedMode(boolean embeddedMode) {
       this.isEmbeddedMode = embeddedMode;
     }
+
     public void setSessionVars(Map<String, String> sessionVars) {
       this.sessionVars = sessionVars;
     }
+
+    public void setSuppliedAuthorityList(String[] authorityList) {
+      this.authorityList = authorityList;
+    }
+
+    public void setZooKeeperEnsemble(String zooKeeperEnsemble) {
+      this.zooKeeperEnsemble = zooKeeperEnsemble;
+    }
+
+    public void setCurrentHostZnodePath(String currentHostZnodePath) {
+      this.currentHostZnodePath = currentHostZnodePath;
+    }
   }
 
   // Verify success or success_with_info status, else throw SQLException
@@ -124,27 +234,33 @@ public class Utils {
 
   /**
    * Parse JDBC connection URL
-   * The new format of the URL is jdbc:hive2://<host>:<port>/dbName;sess_var_list?hive_conf_list#hive_var_list
-   * where the optional sess, conf and var lists are semicolon separated <key>=<val> pairs. As before, if the
-   * host/port is not specified, it the driver runs an embedded hive.
+   * The new format of the URL is:
+   * jdbc:hive2://<host1>:<port1>,<host2>:<port2>/dbName;sess_var_list?hive_conf_list#hive_var_list
+   * where the optional sess, conf and var lists are semicolon separated <key>=<val> pairs.
+   * For utilizing dynamic service discovery with HiveServer2 multiple comma separated host:port pairs can
+   * be specified as shown above.
+   * The JDBC driver resolves the list of uris and picks a specific server instance to connect to.
+   * Currently, dynamic service discovery using ZooKeeper is supported, in which case the host:port pairs represent a ZooKeeper ensemble.
+   *
+   * As before, if the host/port is not specified, it the driver runs an embedded hive.
    * examples -
    *  jdbc:hive2://ubuntu:11000/db2?hive.cli.conf.printheader=true;hive.exec.mode.local.auto.inputbytes.max=9999#stab=salesTable;icol=customerID
    *  jdbc:hive2://?hive.cli.conf.printheader=true;hive.exec.mode.local.auto.inputbytes.max=9999#stab=salesTable;icol=customerID
    *  jdbc:hive2://ubuntu:11000/db2;user=foo;password=bar
    *
    *  Connect to http://server:10001/hs2, with specified basicAuth credentials and initial database:
-   *     jdbc:hive2://server:10001/db;user=foo;password=bar?hive.server2.transport.mode=http;hive.server2.thrift.http.path=hs2
-   *
-   * Note that currently the session properties are not used.
+   *  jdbc:hive2://server:10001/db;user=foo;password=bar?hive.server2.transport.mode=http;hive.server2.thrift.http.path=hs2
    *
    * @param uri
    * @return
+   * @throws SQLException
    */
-  public static JdbcConnectionParams parseURL(String uri) throws IllegalArgumentException {
+  public static JdbcConnectionParams parseURL(String uri) throws JdbcUriParseException,
+      SQLException, ZooKeeperHiveClientException {
     JdbcConnectionParams connParams = new JdbcConnectionParams();
 
     if (!uri.startsWith(URL_PREFIX)) {
-      throw new IllegalArgumentException("Bad URL format: Missing prefix " + URL_PREFIX);
+      throw new JdbcUriParseException("Bad URL format: Missing prefix " + URL_PREFIX);
     }
 
     // For URLs with no other configuration
@@ -154,32 +270,35 @@ public class Utils {
       return connParams;
     }
 
-    URI jdbcURI = URI.create(uri.substring(URI_JDBC_PREFIX.length()));
-
-    // Check to prevent unintentional use of embedded mode. A missing "/"
-    // to separate the 'path' portion of URI can result in this.
-    // The missing "/" common typo while using secure mode, eg of such url -
-    // jdbc:hive2://localhost:10000;principal=hive/HiveServer2Host@YOUR-REALM.COM
-    if((jdbcURI.getAuthority() != null) && (jdbcURI.getHost()==null)) {
-       throw new IllegalArgumentException("Bad URL format. Hostname not found "
-           + " in authority part of the url: " + jdbcURI.getAuthority()
-           + ". Are you missing a '/' after the hostname ?");
-    }
-
-    connParams.setHost(jdbcURI.getHost());
-    if (connParams.getHost() == null) {
+    // The JDBC URI now supports specifying multiple host:port if dynamic service discovery is
+    // configured on HiveServer2 (like: host1:port1,host2:port2,host3:port3)
+    // We'll extract the authorities (host:port combo) from the URI, extract session vars, hive
+    // confs & hive vars by parsing it as a Java URI.
+    // To parse the intermediate URI as a Java URI, we'll give a dummy authority(dummy:00000).
+    // Later, we'll substitute the dummy authority for a resolved authority.
+    String dummyAuthorityString = "dummyhost:00000";
+    String suppliedAuthorities = getAuthorities(uri, connParams);
+    if ((suppliedAuthorities == null) || (suppliedAuthorities.isEmpty())) {
+      // Given uri of the form:
+      // jdbc:hive2:///dbName;sess_var_list?hive_conf_list#hive_var_list
       connParams.setEmbeddedMode(true);
     } else {
-      int port = jdbcURI.getPort();
-      if (port == -1) {
-        port = Integer.valueOf(DEFAULT_PORT);
-      }
-      connParams.setPort(port);
+      LOG.info("Supplied authorities: " + suppliedAuthorities);
+      String[] authorityList = suppliedAuthorities.split(",");
+      connParams.setSuppliedAuthorityList(authorityList);
+      uri = uri.replace(suppliedAuthorities, dummyAuthorityString);
     }
 
+    // Now parse the connection uri with dummy authority
+    URI jdbcURI = URI.create(uri.substring(URI_JDBC_PREFIX.length()));
+
     // key=value pattern
     Pattern pattern = Pattern.compile("([^;]*)=([^;]*)[;]?");
 
+    Map<String, String> sessionVarMap = connParams.getSessionVars();
+    Map<String, String> hiveConfMap = connParams.getHiveConfs();
+    Map<String, String> hiveVarMap = connParams.getHiveVars();
+
     // dbname and session settings
     String sessVars = jdbcURI.getPath();
     if ((sessVars != null) && !sessVars.isEmpty()) {
@@ -192,12 +311,13 @@ public class Utils {
       } else {
         // we have dbname followed by session parameters
         dbName = sessVars.substring(0, sessVars.indexOf(';'));
-        sessVars = sessVars.substring(sessVars.indexOf(';')+1);
+        sessVars = sessVars.substring(sessVars.indexOf(';') + 1);
         if (sessVars != null) {
           Matcher sessMatcher = pattern.matcher(sessVars);
           while (sessMatcher.find()) {
-            if (connParams.getSessionVars().put(sessMatcher.group(1), sessMatcher.group(2)) != null) {
-              throw new IllegalArgumentException("Bad URL format: Multiple values for property " + sessMatcher.group(1));
+            if (sessionVarMap.put(sessMatcher.group(1), sessMatcher.group(2)) != null) {
+              throw new JdbcUriParseException("Bad URL format: Multiple values for property "
+                  + sessMatcher.group(1));
             }
           }
         }
@@ -212,7 +332,7 @@ public class Utils {
     if (confStr != null) {
       Matcher confMatcher = pattern.matcher(confStr);
       while (confMatcher.find()) {
-        connParams.getHiveConfs().put(confMatcher.group(1), confMatcher.group(2));
+        hiveConfMap.put(confMatcher.group(1), confMatcher.group(2));
       }
     }
 
@@ -221,14 +341,204 @@ public class Utils {
     if (varStr != null) {
       Matcher varMatcher = pattern.matcher(varStr);
       while (varMatcher.find()) {
-        connParams.getHiveVars().put(varMatcher.group(1), varMatcher.group(2));
+        hiveVarMap.put(varMatcher.group(1), varMatcher.group(2));
       }
     }
 
+    // Handle all deprecations here:
+    String newUsage;
+    String usageUrlBase = "jdbc:hive2://<host>:<port>/dbName;";
+    // Handle deprecation of AUTH_QOP_DEPRECATED
+    newUsage = usageUrlBase + JdbcConnectionParams.AUTH_QOP + "=<qop_value>";
+    handleParamDeprecation(sessionVarMap, sessionVarMap, JdbcConnectionParams.AUTH_QOP_DEPRECATED,
+        JdbcConnectionParams.AUTH_QOP, newUsage);
+
+    // Handle deprecation of TRANSPORT_MODE_DEPRECATED
+    newUsage = usageUrlBase + JdbcConnectionParams.TRANSPORT_MODE + "=<transport_mode_value>";
+    handleParamDeprecation(hiveConfMap, sessionVarMap,
+        JdbcConnectionParams.TRANSPORT_MODE_DEPRECATED, JdbcConnectionParams.TRANSPORT_MODE,
+        newUsage);
+
+    // Handle deprecation of HTTP_PATH_DEPRECATED
+    newUsage = usageUrlBase + JdbcConnectionParams.HTTP_PATH + "=<http_path_value>";
+    handleParamDeprecation(hiveConfMap, sessionVarMap, JdbcConnectionParams.HTTP_PATH_DEPRECATED,
+        JdbcConnectionParams.HTTP_PATH, newUsage);
+
+    // Extract host, port
+    if (connParams.isEmbeddedMode()) {
+      // In case of embedded mode we were supplied with an empty authority.
+      // So we never substituted the authority with a dummy one.
+      connParams.setHost(jdbcURI.getHost());
+      connParams.setPort(jdbcURI.getPort());
+    } else {
+      // Else substitute the dummy authority with a resolved one.
+      // In case of dynamic service discovery using ZooKeeper, it picks a server uri from ZooKeeper
+      String resolvedAuthorityString = resolveAuthority(connParams);
+      uri = uri.replace(dummyAuthorityString, resolvedAuthorityString);
+      connParams.setJdbcUriString(uri);
+      // Create a Java URI from the resolved URI for extracting the host/port
+      URI resolvedAuthorityURI = null;
+      try {
+        resolvedAuthorityURI = new URI(null, resolvedAuthorityString, null, null, null);
+      } catch (URISyntaxException e) {
+        throw new JdbcUriParseException("Bad URL format: ", e);
+      }
+      connParams.setHost(resolvedAuthorityURI.getHost());
+      connParams.setPort(resolvedAuthorityURI.getPort());
+    }
+
     return connParams;
   }
 
   /**
+   * Remove the deprecatedName param from the fromMap and put the key value in the toMap.
+   * Also log a deprecation message for the client.
+   * @param fromMap
+   * @param toMap
+   * @param oldName
+   * @param newName
+   */
+  private static void handleParamDeprecation(Map<String, String> fromMap, Map<String, String> toMap,
+      String deprecatedName, String newName, String newUsage) {
+    if (fromMap.containsKey(deprecatedName)) {
+      LOG.warn("***** JDBC param deprecation *****");
+      LOG.warn("The use of " + deprecatedName + " is deprecated.");
+      LOG.warn("Please use " + newName +" like so: " + newUsage);
+      String paramValue = fromMap.remove(deprecatedName);
+      toMap.put(newName, paramValue);
+    }
+  }
+
+  /**
+   * Get the authority string from the supplied uri, which could potentially contain multiple
+   * host:port pairs.
+   *
+   * @param uri
+   * @param connParams
+   * @return
+   * @throws JdbcUriParseException
+   */
+  private static String getAuthorities(String uri, JdbcConnectionParams connParams)
+      throws JdbcUriParseException {
+    String authorities;
+    /**
+     * For a jdbc uri like:
+     * jdbc:hive2://<host1>:<port1>,<host2>:<port2>/dbName;sess_var_list?conf_list#var_list
+     * Extract the uri host:port list starting after "jdbc:hive2://",
+     * till the 1st "/" or "?" or "#" whichever comes first & in the given order
+     * Examples:
+     * jdbc:hive2://host1:port1,host2:port2,host3:port3/db;k1=v1?k2=v2#k3=v3
+     * jdbc:hive2://host1:port1,host2:port2,host3:port3/;k1=v1?k2=v2#k3=v3
+     * jdbc:hive2://host1:port1,host2:port2,host3:port3?k2=v2#k3=v3
+     * jdbc:hive2://host1:port1,host2:port2,host3:port3#k3=v3
+     */
+    int fromIndex = Utils.URL_PREFIX.length();
+    int toIndex = -1;
+    ArrayList<String> toIndexChars = new ArrayList<String>(Arrays.asList("/", "?", "#"));
+    for (String toIndexChar : toIndexChars) {
+      toIndex = uri.indexOf(toIndexChar, fromIndex);
+      if (toIndex > 0) {
+        break;
+      }
+    }
+    if (toIndex < 0) {
+      authorities = uri.substring(fromIndex);
+    } else {
+      authorities = uri.substring(fromIndex, toIndex);
+    }
+    return authorities;
+  }
+
+  /**
+   * Get a string representing a specific host:port
+   * @param connParams
+   * @return
+   * @throws JdbcUriParseException
+   * @throws ZooKeeperHiveClientException
+   */
+  private static String resolveAuthority(JdbcConnectionParams connParams)
+      throws JdbcUriParseException, ZooKeeperHiveClientException {
+    String serviceDiscoveryMode =
+        connParams.getSessionVars().get(JdbcConnectionParams.SERVICE_DISCOVERY_MODE);
+    if ((serviceDiscoveryMode != null)
+        && (JdbcConnectionParams.SERVICE_DISCOVERY_MODE_ZOOKEEPER
+            .equalsIgnoreCase(serviceDiscoveryMode))) {
+      // Resolve using ZooKeeper
+      return resolveAuthorityUsingZooKeeper(connParams);
+    } else {
+      String authority = connParams.getAuthorityList()[0];
+      URI jdbcURI = URI.create(URI_HIVE_PREFIX + "//" + authority);
+      // Check to prevent unintentional use of embedded mode. A missing "/"
+      // to separate the 'path' portion of URI can result in this.
+      // The missing "/" common typo while using secure mode, eg of such url -
+      // jdbc:hive2://localhost:10000;principal=hive/HiveServer2Host@YOUR-REALM.COM
+      if ((jdbcURI.getAuthority() != null) && (jdbcURI.getHost() == null)) {
+        throw new JdbcUriParseException("Bad URL format. Hostname not found "
+            + " in authority part of the url: " + jdbcURI.getAuthority()
+            + ". Are you missing a '/' after the hostname ?");
+      }
+      // Return the 1st element of the array
+      return jdbcURI.getAuthority();
+    }
+  }
+
+  /**
+   * Read a specific host:port from ZooKeeper
+   * @param connParams
+   * @return
+   * @throws ZooKeeperHiveClientException
+   */
+  private static String resolveAuthorityUsingZooKeeper(JdbcConnectionParams connParams)
+      throws ZooKeeperHiveClientException {
+    // Set ZooKeeper ensemble in connParams for later use
+    connParams.setZooKeeperEnsemble(joinStringArray(connParams.getAuthorityList(), ","));
+    return ZooKeeperHiveClientHelper.getNextServerUriFromZooKeeper(connParams);
+  }
+
+  /**
+   * Read the next server coordinates (host:port combo) from ZooKeeper. Ignore the znodes already
+   * explored. Also update the host, port, jdbcUriString fields of connParams.
+   *
+   * @param connParams
+   * @throws ZooKeeperHiveClientException
+   */
+  static void updateConnParamsFromZooKeeper(JdbcConnectionParams connParams)
+      throws ZooKeeperHiveClientException {
+    // Add current host to the rejected list
+    connParams.getRejectedHostZnodePaths().add(connParams.getCurrentHostZnodePath());
+    // Get another HiveServer2 uri from ZooKeeper
+    String serverUriString = ZooKeeperHiveClientHelper.getNextServerUriFromZooKeeper(connParams);
+    // Parse serverUri to a java URI and extract host, port
+    URI serverUri = null;
+    try {
+      // Note URL_PREFIX is not a valid scheme format, therefore leaving it null in the constructor
+      // to construct a valid URI
+      serverUri = new URI(null, serverUriString, null, null, null);
+    } catch (URISyntaxException e) {
+      throw new ZooKeeperHiveClientException(e);
+    }
+    String oldServerHost = connParams.getHost();
+    int oldServerPort = connParams.getPort();
+    String newServerHost = serverUri.getHost();
+    int newServerPort = serverUri.getPort();
+    connParams.setHost(newServerHost);
+    connParams.setPort(newServerPort);
+    connParams.setJdbcUriString(connParams.getJdbcUriString().replace(
+        oldServerHost + ":" + oldServerPort, newServerHost + ":" + newServerPort));
+  }
+
+  private static String joinStringArray(String[] stringArray, String seperator) {
+    StringBuilder stringBuilder = new StringBuilder();
+    for (int cur = 0, end = stringArray.length; cur < end; cur++) {
+      if (cur > 0) {
+        stringBuilder.append(seperator);
+      }
+      stringBuilder.append(stringArray[cur]);
+    }
+    return stringBuilder.toString();
+  }
+
+  /**
    * Takes a version string delimited by '.' and '-' characters
    * and returns a partial version.
    *

Modified: hive/branches/llap/metastore/scripts/upgrade/mssql/upgrade-0.13.0-to-0.14.0.mssql.sql
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/scripts/upgrade/mssql/upgrade-0.13.0-to-0.14.0.mssql.sql?rev=1631841&r1=1631840&r2=1631841&view=diff
==============================================================================
--- hive/branches/llap/metastore/scripts/upgrade/mssql/upgrade-0.13.0-to-0.14.0.mssql.sql (original)
+++ hive/branches/llap/metastore/scripts/upgrade/mssql/upgrade-0.13.0-to-0.14.0.mssql.sql Tue Oct 14 19:06:45 2014
@@ -1,6 +1,7 @@
 SELECT 'Upgrading MetaStore schema from 0.13.0 to 0.14.0' AS MESSAGE;
 
 :r 002-HIVE-7784.mssql.sql;
+:r 003-HIVE-8239.mssql.sql;
 
 UPDATE VERSION SET SCHEMA_VERSION='0.14.0', VERSION_COMMENT='Hive release version 0.14.0' where VER_ID=1;
 SELECT 'Finished upgrading MetaStore schema from 0.13.0 to 0.14.0' AS MESSAGE;

Modified: hive/branches/llap/metastore/scripts/upgrade/oracle/upgrade-0.13.0-to-0.14.0.oracle.sql
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/scripts/upgrade/oracle/upgrade-0.13.0-to-0.14.0.oracle.sql?rev=1631841&r1=1631840&r2=1631841&view=diff
==============================================================================
--- hive/branches/llap/metastore/scripts/upgrade/oracle/upgrade-0.13.0-to-0.14.0.oracle.sql (original)
+++ hive/branches/llap/metastore/scripts/upgrade/oracle/upgrade-0.13.0-to-0.14.0.oracle.sql Tue Oct 14 19:06:45 2014
@@ -1,5 +1,6 @@
 SELECT 'Upgrading MetaStore schema from 0.13.0 to 0.14.0' AS Status from dual;
 
+@019-HIVE-7118.oracle.sql;
 @020-HIVE-7784.oracle.sql;
 
 UPDATE VERSION SET SCHEMA_VERSION='0.14.0', VERSION_COMMENT='Hive release version 0.14.0' where VER_ID=1;

Modified: hive/branches/llap/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java?rev=1631841&r1=1631840&r2=1631841&view=diff
==============================================================================
--- hive/branches/llap/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java (original)
+++ hive/branches/llap/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java Tue Oct 14 19:06:45 2014
@@ -135,9 +135,9 @@ public class FieldSchema implements org.
     String comment)
   {
     this();
-    this.name = name;
-    this.type = type;
-    this.comment = comment;
+    this.name = org.apache.hive.common.util.HiveStringUtils.intern(name);
+    this.type = org.apache.hive.common.util.HiveStringUtils.intern(type);
+    this.comment = org.apache.hive.common.util.HiveStringUtils.intern(comment);
   }
 
   /**
@@ -145,13 +145,13 @@ public class FieldSchema implements org.
    */
   public FieldSchema(FieldSchema other) {
     if (other.isSetName()) {
-      this.name = other.name;
+      this.name = org.apache.hive.common.util.HiveStringUtils.intern(other.name);
     }
     if (other.isSetType()) {
-      this.type = other.type;
+      this.type = org.apache.hive.common.util.HiveStringUtils.intern(other.type);
     }
     if (other.isSetComment()) {
-      this.comment = other.comment;
+      this.comment = org.apache.hive.common.util.HiveStringUtils.intern(other.comment);
     }
   }
 
@@ -171,7 +171,7 @@ public class FieldSchema implements org.
   }
 
   public void setName(String name) {
-    this.name = name;
+    this.name = org.apache.hive.common.util.HiveStringUtils.intern(name);
   }
 
   public void unsetName() {
@@ -194,7 +194,7 @@ public class FieldSchema implements org.
   }
 
   public void setType(String type) {
-    this.type = type;
+    this.type = org.apache.hive.common.util.HiveStringUtils.intern(type);
   }
 
   public void unsetType() {
@@ -217,7 +217,7 @@ public class FieldSchema implements org.
   }
 
   public void setComment(String comment) {
-    this.comment = comment;
+    this.comment = org.apache.hive.common.util.HiveStringUtils.intern(comment);
   }
 
   public void unsetComment() {

Modified: hive/branches/llap/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java?rev=1631841&r1=1631840&r2=1631841&view=diff
==============================================================================
--- hive/branches/llap/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java (original)
+++ hive/branches/llap/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java Tue Oct 14 19:06:45 2014
@@ -182,14 +182,14 @@ public class Partition implements org.ap
   {
     this();
     this.values = values;
-    this.dbName = dbName;
-    this.tableName = tableName;
+    this.dbName = org.apache.hive.common.util.HiveStringUtils.intern(dbName);
+    this.tableName = org.apache.hive.common.util.HiveStringUtils.intern(tableName);
     this.createTime = createTime;
     setCreateTimeIsSet(true);
     this.lastAccessTime = lastAccessTime;
     setLastAccessTimeIsSet(true);
     this.sd = sd;
-    this.parameters = parameters;
+    this.parameters = org.apache.hive.common.util.HiveStringUtils.intern(parameters);
   }
 
   /**
@@ -205,10 +205,10 @@ public class Partition implements org.ap
       this.values = __this__values;
     }
     if (other.isSetDbName()) {
-      this.dbName = other.dbName;
+      this.dbName = org.apache.hive.common.util.HiveStringUtils.intern(other.dbName);
     }
     if (other.isSetTableName()) {
-      this.tableName = other.tableName;
+      this.tableName = org.apache.hive.common.util.HiveStringUtils.intern(other.tableName);
     }
     this.createTime = other.createTime;
     this.lastAccessTime = other.lastAccessTime;
@@ -222,9 +222,9 @@ public class Partition implements org.ap
         String other_element_key = other_element.getKey();
         String other_element_value = other_element.getValue();
 
-        String __this__parameters_copy_key = other_element_key;
+        String __this__parameters_copy_key = org.apache.hive.common.util.HiveStringUtils.intern(other_element_key);
 
-        String __this__parameters_copy_value = other_element_value;
+        String __this__parameters_copy_value = org.apache.hive.common.util.HiveStringUtils.intern(other_element_value);
 
         __this__parameters.put(__this__parameters_copy_key, __this__parameters_copy_value);
       }
@@ -296,7 +296,7 @@ public class Partition implements org.ap
   }
 
   public void setDbName(String dbName) {
-    this.dbName = dbName;
+    this.dbName = org.apache.hive.common.util.HiveStringUtils.intern(dbName);
   }
 
   public void unsetDbName() {
@@ -319,7 +319,7 @@ public class Partition implements org.ap
   }
 
   public void setTableName(String tableName) {
-    this.tableName = tableName;
+    this.tableName = org.apache.hive.common.util.HiveStringUtils.intern(tableName);
   }
 
   public void unsetTableName() {
@@ -420,7 +420,7 @@ public class Partition implements org.ap
   }
 
   public void setParameters(Map<String,String> parameters) {
-    this.parameters = parameters;
+    this.parameters = org.apache.hive.common.util.HiveStringUtils.intern(parameters);
   }
 
   public void unsetParameters() {

Modified: hive/branches/llap/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java?rev=1631841&r1=1631840&r2=1631841&view=diff
==============================================================================
--- hive/branches/llap/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java (original)
+++ hive/branches/llap/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java Tue Oct 14 19:06:45 2014
@@ -137,9 +137,9 @@ public class SerDeInfo implements org.ap
     Map<String,String> parameters)
   {
     this();
-    this.name = name;
-    this.serializationLib = serializationLib;
-    this.parameters = parameters;
+    this.name = org.apache.hive.common.util.HiveStringUtils.intern(name);
+    this.serializationLib = org.apache.hive.common.util.HiveStringUtils.intern(serializationLib);
+    this.parameters = org.apache.hive.common.util.HiveStringUtils.intern(parameters);
   }
 
   /**
@@ -147,10 +147,10 @@ public class SerDeInfo implements org.ap
    */
   public SerDeInfo(SerDeInfo other) {
     if (other.isSetName()) {
-      this.name = other.name;
+      this.name = org.apache.hive.common.util.HiveStringUtils.intern(other.name);
     }
     if (other.isSetSerializationLib()) {
-      this.serializationLib = other.serializationLib;
+      this.serializationLib = org.apache.hive.common.util.HiveStringUtils.intern(other.serializationLib);
     }
     if (other.isSetParameters()) {
       Map<String,String> __this__parameters = new HashMap<String,String>();
@@ -159,9 +159,9 @@ public class SerDeInfo implements org.ap
         String other_element_key = other_element.getKey();
         String other_element_value = other_element.getValue();
 
-        String __this__parameters_copy_key = other_element_key;
+        String __this__parameters_copy_key = org.apache.hive.common.util.HiveStringUtils.intern(other_element_key);
 
-        String __this__parameters_copy_value = other_element_value;
+        String __this__parameters_copy_value = org.apache.hive.common.util.HiveStringUtils.intern(other_element_value);
 
         __this__parameters.put(__this__parameters_copy_key, __this__parameters_copy_value);
       }
@@ -185,7 +185,7 @@ public class SerDeInfo implements org.ap
   }
 
   public void setName(String name) {
-    this.name = name;
+    this.name = org.apache.hive.common.util.HiveStringUtils.intern(name);
   }
 
   public void unsetName() {
@@ -208,7 +208,7 @@ public class SerDeInfo implements org.ap
   }
 
   public void setSerializationLib(String serializationLib) {
-    this.serializationLib = serializationLib;
+    this.serializationLib = org.apache.hive.common.util.HiveStringUtils.intern(serializationLib);
   }
 
   public void unsetSerializationLib() {
@@ -242,7 +242,7 @@ public class SerDeInfo implements org.ap
   }
 
   public void setParameters(Map<String,String> parameters) {
-    this.parameters = parameters;
+    this.parameters = org.apache.hive.common.util.HiveStringUtils.intern(parameters);
   }
 
   public void unsetParameters() {

Modified: hive/branches/llap/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java?rev=1631841&r1=1631840&r2=1631841&view=diff
==============================================================================
--- hive/branches/llap/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java (original)
+++ hive/branches/llap/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java Tue Oct 14 19:06:45 2014
@@ -216,17 +216,17 @@ public class StorageDescriptor implement
   {
     this();
     this.cols = cols;
-    this.location = location;
-    this.inputFormat = inputFormat;
-    this.outputFormat = outputFormat;
+    this.location = org.apache.hive.common.util.HiveStringUtils.intern(location);
+    this.inputFormat = org.apache.hive.common.util.HiveStringUtils.intern(inputFormat);
+    this.outputFormat = org.apache.hive.common.util.HiveStringUtils.intern(outputFormat);
     this.compressed = compressed;
     setCompressedIsSet(true);
     this.numBuckets = numBuckets;
     setNumBucketsIsSet(true);
     this.serdeInfo = serdeInfo;
-    this.bucketCols = bucketCols;
+    this.bucketCols = org.apache.hive.common.util.HiveStringUtils.intern(bucketCols);
     this.sortCols = sortCols;
-    this.parameters = parameters;
+    this.parameters = org.apache.hive.common.util.HiveStringUtils.intern(parameters);
   }
 
   /**
@@ -242,13 +242,13 @@ public class StorageDescriptor implement
       this.cols = __this__cols;
     }
     if (other.isSetLocation()) {
-      this.location = other.location;
+      this.location = org.apache.hive.common.util.HiveStringUtils.intern(other.location);
     }
     if (other.isSetInputFormat()) {
-      this.inputFormat = other.inputFormat;
+      this.inputFormat = org.apache.hive.common.util.HiveStringUtils.intern(other.inputFormat);
     }
     if (other.isSetOutputFormat()) {
-      this.outputFormat = other.outputFormat;
+      this.outputFormat = org.apache.hive.common.util.HiveStringUtils.intern(other.outputFormat);
     }
     this.compressed = other.compressed;
     this.numBuckets = other.numBuckets;
@@ -276,9 +276,9 @@ public class StorageDescriptor implement
         String other_element_key = other_element.getKey();
         String other_element_value = other_element.getValue();
 
-        String __this__parameters_copy_key = other_element_key;
+        String __this__parameters_copy_key = org.apache.hive.common.util.HiveStringUtils.intern(other_element_key);
 
-        String __this__parameters_copy_value = other_element_value;
+        String __this__parameters_copy_value = org.apache.hive.common.util.HiveStringUtils.intern(other_element_value);
 
         __this__parameters.put(__this__parameters_copy_key, __this__parameters_copy_value);
       }
@@ -356,7 +356,7 @@ public class StorageDescriptor implement
   }
 
   public void setLocation(String location) {
-    this.location = location;
+    this.location = org.apache.hive.common.util.HiveStringUtils.intern(location);
   }
 
   public void unsetLocation() {
@@ -379,7 +379,7 @@ public class StorageDescriptor implement
   }
 
   public void setInputFormat(String inputFormat) {
-    this.inputFormat = inputFormat;
+    this.inputFormat = org.apache.hive.common.util.HiveStringUtils.intern(inputFormat);
   }
 
   public void unsetInputFormat() {
@@ -402,7 +402,7 @@ public class StorageDescriptor implement
   }
 
   public void setOutputFormat(String outputFormat) {
-    this.outputFormat = outputFormat;
+    this.outputFormat = org.apache.hive.common.util.HiveStringUtils.intern(outputFormat);
   }
 
   public void unsetOutputFormat() {
@@ -507,7 +507,7 @@ public class StorageDescriptor implement
   }
 
   public void setBucketCols(List<String> bucketCols) {
-    this.bucketCols = bucketCols;
+    this.bucketCols = org.apache.hive.common.util.HiveStringUtils.intern(bucketCols);
   }
 
   public void unsetBucketCols() {
@@ -579,7 +579,7 @@ public class StorageDescriptor implement
   }
 
   public void setParameters(Map<String,String> parameters) {
-    this.parameters = parameters;
+    this.parameters = org.apache.hive.common.util.HiveStringUtils.intern(parameters);
   }
 
   public void unsetParameters() {

Modified: hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=1631841&r1=1631840&r2=1631841&view=diff
==============================================================================
--- hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Tue Oct 14 19:06:45 2014
@@ -48,9 +48,8 @@ import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 import java.util.regex.Pattern;
 
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableListMultimap;
-import com.google.common.collect.Multimaps;
+import javax.jdo.JDOException;
+
 import org.apache.commons.cli.OptionBuilder;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -171,6 +170,8 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.events.PreDropTableEvent;
 import org.apache.hadoop.hive.metastore.events.PreEventContext;
 import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent;
+import org.apache.hadoop.hive.metastore.events.PreReadDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.PreReadTableEvent;
 import org.apache.hadoop.hive.metastore.model.MDBPrivilege;
 import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege;
 import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege;
@@ -203,7 +204,10 @@ import org.apache.thrift.transport.TTran
 import com.facebook.fb303.FacebookBase;
 import com.facebook.fb303.fb_status;
 import com.google.common.base.Splitter;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableListMultimap;
 import com.google.common.collect.Lists;
+import com.google.common.collect.Multimaps;
 
 /**
  * TODO:pc remove application logic to a separate interface.
@@ -568,22 +572,50 @@ public class HiveMetaStore extends Thrif
     }
 
     /**
-     * create default database if it doesn't exist
+     * create default database if it doesn't exist.
+     *
+     * This is a potential contention when HiveServer2 using embedded metastore and Metastore
+     * Server try to concurrently invoke createDefaultDB. If one failed, JDOException was caught
+     * for one more time try, if failed again, simply ignored by warning, which meant another
+     * succeeds.
      *
      * @throws MetaException
      */
     private void createDefaultDB() throws MetaException {
       try {
         createDefaultDB_core(getMS());
+      } catch (JDOException e) {
+        LOG.warn("Retrying creating default database after error: " + e.getMessage(), e);
+        try {
+          createDefaultDB_core(getMS());
+        } catch (InvalidObjectException e1) {
+          throw new MetaException(e1.getMessage());
+        }
       } catch (InvalidObjectException e) {
         throw new MetaException(e.getMessage());
-      } catch (MetaException e) {
-        throw e;
       }
     }
 
-
+    /**
+     * create default roles if they don't exist.
+     *
+     * This is a potential contention when HiveServer2 using embedded metastore and Metastore
+     * Server try to concurrently invoke createDefaultRoles. If one failed, JDOException was caught
+     * for one more time try, if failed again, simply ignored by warning, which meant another
+     * succeeds.
+     *
+     * @throws MetaException
+     */
     private void createDefaultRoles() throws MetaException {
+      try {
+        createDefaultRoles_core();
+      } catch (JDOException e) {
+        LOG.warn("Retrying creating default roles after error: " + e.getMessage(), e);
+        createDefaultRoles_core();
+      }
+    }
+
+    private void createDefaultRoles_core() throws MetaException {
 
       RawStore ms = getMS();
       try {
@@ -620,7 +652,25 @@ public class HiveMetaStore extends Thrif
       }
     }
 
+    /**
+     * add admin users if they don't exist.
+     *
+     * This is a potential contention when HiveServer2 using embedded metastore and Metastore
+     * Server try to concurrently invoke addAdminUsers. If one failed, JDOException was caught for
+     * one more time try, if failed again, simply ignored by warning, which meant another succeeds.
+     *
+     * @throws MetaException
+     */
     private void addAdminUsers() throws MetaException {
+      try {
+        addAdminUsers_core();
+      } catch (JDOException e) {
+        LOG.warn("Retrying adding admin users after error: " + e.getMessage(), e);
+        addAdminUsers_core();
+      }
+    }
+
+    private void addAdminUsers_core() throws MetaException {
 
       // now add pre-configured users to admin role
       String userStr = HiveConf.getVar(hiveConf,ConfVars.USERS_IN_ADMIN_ROLE,"").trim();
@@ -803,7 +853,7 @@ public class HiveMetaStore extends Thrif
       Exception ex = null;
       try {
         try {
-          if (null != get_database(db.getName())) {
+          if (null != get_database_core(db.getName())) {
             throw new AlreadyExistsException("Database " + db.getName() + " already exists");
           }
         } catch (NoSuchObjectException e) {
@@ -829,25 +879,45 @@ public class HiveMetaStore extends Thrif
     }
 
     @Override
-    public Database get_database(final String name) throws NoSuchObjectException,
-        MetaException {
+    public Database get_database(final String name) throws NoSuchObjectException, MetaException {
       startFunction("get_database", ": " + name);
       Database db = null;
       Exception ex = null;
       try {
-        db = getMS().getDatabase(name);
+        db = get_database_core(name);
+        firePreEvent(new PreReadDatabaseEvent(db, this));
       } catch (MetaException e) {
         ex = e;
         throw e;
       } catch (NoSuchObjectException e) {
         ex = e;
         throw e;
+      } finally {
+        endFunction("get_database", db != null, ex);
+      }
+      return db;
+    }
+
+    /**
+     * Equivalent to get_database, but does not write to audit logs, or fire pre-event listners.
+     * Meant to be used for internal hive classes that don't use the thrift interface.
+     * @param name
+     * @return
+     * @throws NoSuchObjectException
+     * @throws MetaException
+     */
+    public Database get_database_core(final String name) throws NoSuchObjectException,
+        MetaException {
+      Database db = null;
+      try {
+        db = getMS().getDatabase(name);
+      } catch (MetaException e) {
+        throw e;
+      } catch (NoSuchObjectException e) {
+        throw e;
       } catch (Exception e) {
-        ex = e;
         assert (e instanceof RuntimeException);
         throw (RuntimeException) e;
-      } finally {
-        endFunction("get_database", db != null, ex);
       }
       return db;
     }
@@ -1373,7 +1443,7 @@ public class HiveMetaStore extends Thrif
       try {
         ms.openTransaction();
         // drop any partitions
-        tbl = get_table(dbname, name);
+        tbl = get_table_core(dbname, name);
         if (tbl == null) {
           throw new NoSuchObjectException(name + " doesn't exist");
         }
@@ -1424,10 +1494,14 @@ public class HiveMetaStore extends Thrif
         if (!success) {
           ms.rollbackTransaction();
         } else if (deleteData && !isExternal) {
+          boolean ifPurge = false;
+          if (envContext != null){
+            ifPurge = Boolean.parseBoolean(envContext.getProperties().get("ifPurge"));
+          }
           // Delete the data in the partitions which have other locations
-          deletePartitionData(partPaths);
+          deletePartitionData(partPaths, ifPurge);
           // Delete the data in the table
-          deleteTableData(tblPath);
+          deleteTableData(tblPath, ifPurge);
           // ok even if the data is not deleted
         }
         for (MetaStoreEventListener listener : listeners) {
@@ -1444,9 +1518,21 @@ public class HiveMetaStore extends Thrif
      * @param tablePath
      */
     private void deleteTableData(Path tablePath) {
+      deleteTableData(tablePath, false);
+    }
+
+    /**
+     * Deletes the data in a table's location, if it fails logs an error
+     *
+     * @param tablePath
+     * @param ifPurge completely purge the table (skipping trash) while removing
+     *                data from warehouse
+     */
+    private void deleteTableData(Path tablePath, boolean ifPurge) {
+
       if (tablePath != null) {
         try {
-          wh.deleteDir(tablePath, true);
+          wh.deleteDir(tablePath, true, ifPurge);
         } catch (Exception e) {
           LOG.error("Failed to delete table directory: " + tablePath +
               " " + e.getMessage());
@@ -1461,10 +1547,22 @@ public class HiveMetaStore extends Thrif
      * @param partPaths
      */
     private void deletePartitionData(List<Path> partPaths) {
+      deletePartitionData(partPaths, false);
+    }
+
+    /**
+    * Give a list of partitions' locations, tries to delete each one
+    * and for each that fails logs an error.
+    *
+    * @param partPaths
+    * @param ifPurge completely purge the partition (skipping trash) while
+    *                removing data from warehouse
+    */
+    private void deletePartitionData(List<Path> partPaths, boolean ifPurge) {
       if (partPaths != null && !partPaths.isEmpty()) {
         for (Path partPath : partPaths) {
           try {
-            wh.deleteDir(partPath, true);
+            wh.deleteDir(partPath, true, ifPurge);
           } catch (Exception e) {
             LOG.error("Failed to delete partition directory: " + partPath +
                 " " + e.getMessage());
@@ -1597,13 +1695,40 @@ public class HiveMetaStore extends Thrif
       startTableFunction("get_table", dbname, name);
       Exception ex = null;
       try {
+        t = get_table_core(dbname, name);
+        firePreEvent(new PreReadTableEvent(t, this));
+      } catch (MetaException e) {
+        ex = e;
+        throw e;
+      } catch (NoSuchObjectException e) {
+        ex = e;
+        throw e;
+      } finally {
+        endFunction("get_table", t != null, ex, name);
+      }
+      return t;
+    }
+
+    /**
+     * Equivalent of get_table, but does not log audits and fire pre-event listener.
+     * Meant to be used for calls made by other hive classes, that are not using the
+     * thrift interface.
+     * @param dbname
+     * @param name
+     * @return Table object
+     * @throws MetaException
+     * @throws NoSuchObjectException
+     */
+    public Table get_table_core(final String dbname, final String name) throws MetaException,
+        NoSuchObjectException {
+      Table t;
+      try {
         t = getMS().getTable(dbname, name);
         if (t == null) {
           throw new NoSuchObjectException(dbname + "." + name
               + " table not found");
         }
       } catch (Exception e) {
-        ex = e;
         if (e instanceof MetaException) {
           throw (MetaException) e;
         } else if (e instanceof NoSuchObjectException) {
@@ -1611,8 +1736,6 @@ public class HiveMetaStore extends Thrif
         } else {
           throw newMetaException(e);
         }
-      } finally {
-        endFunction("get_table", t != null, ex, name);
       }
       return t;
     }
@@ -2390,7 +2513,7 @@ public class HiveMetaStore extends Thrif
       try {
         ms.openTransaction();
         part = ms.getPartition(db_name, tbl_name, part_vals);
-        tbl = get_table(db_name, tbl_name);
+        tbl = get_table_core(db_name, tbl_name);
         firePreEvent(new PreDropPartitionEvent(tbl, part, deleteData, this));
 
         if (part == null) {
@@ -2484,7 +2607,7 @@ public class HiveMetaStore extends Thrif
       try {
         // We need Partition-s for firing events and for result; DN needs MPartition-s to drop.
         // Great... Maybe we could bypass fetching MPartitions by issuing direct SQL deletes.
-        tbl = get_table(dbName, tblName);
+        tbl = get_table_core(dbName, tblName);
         int minCount = 0;
         RequestPartsSpec spec = request.getParts();
         List<String> partNames = null;
@@ -2643,6 +2766,7 @@ public class HiveMetaStore extends Thrif
       Partition ret = null;
       Exception ex = null;
       try {
+        fireReadTablePreEvent(db_name, tbl_name);
         ret = getMS().getPartition(db_name, tbl_name, part_vals);
       } catch (Exception e) {
         ex = e;
@@ -2659,6 +2783,28 @@ public class HiveMetaStore extends Thrif
       return ret;
     }
 
+    /**
+     * Fire a pre-event for read table operation, if there are any
+     * pre-event listeners registered
+     *
+     * @param db_name
+     * @param tbl_name
+     * @throws MetaException
+     * @throws NoSuchObjectException
+     */
+    private void fireReadTablePreEvent(String dbName, String tblName) throws MetaException, NoSuchObjectException {
+      if(preListeners.size() > 0) {
+        // do this only if there is a pre event listener registered (avoid unnecessary
+        // metastore api call)
+        Table t = getMS().getTable(dbName, tblName);
+        if (t == null) {
+          throw new NoSuchObjectException(dbName + "." + tblName
+              + " table not found");
+        }
+        firePreEvent(new PreReadTableEvent(t, this));
+      }
+    }
+
     @Override
     public Partition get_partition_with_auth(final String db_name,
         final String tbl_name, final List<String> part_vals,
@@ -2666,7 +2812,7 @@ public class HiveMetaStore extends Thrif
         throws MetaException, NoSuchObjectException, TException {
       startPartitionFunction("get_partition_with_auth", db_name, tbl_name,
           part_vals);
-
+      fireReadTablePreEvent(db_name, tbl_name);
       Partition ret = null;
       Exception ex = null;
       try {
@@ -2688,7 +2834,7 @@ public class HiveMetaStore extends Thrif
     public List<Partition> get_partitions(final String db_name, final String tbl_name,
         final short max_parts) throws NoSuchObjectException, MetaException {
       startTableFunction("get_partitions", db_name, tbl_name);
-
+      fireReadTablePreEvent(db_name, tbl_name);
       List<Partition> ret = null;
       Exception ex = null;
       try {
@@ -2745,7 +2891,7 @@ public class HiveMetaStore extends Thrif
 
       List<PartitionSpec> partitionSpecs = null;
       try {
-        Table table = get_table(dbName, tableName);
+        Table table = get_table_core(dbName, tableName);
         List<Partition> partitions = get_partitions(dbName, tableName, (short) max_parts);
 
         if (is_partition_spec_grouping_enabled(table)) {
@@ -2769,7 +2915,7 @@ public class HiveMetaStore extends Thrif
 
     private static class StorageDescriptorKey {
 
-      private StorageDescriptor sd;
+      private final StorageDescriptor sd;
 
       StorageDescriptorKey(StorageDescriptor sd) { this.sd = sd; }
 
@@ -2891,9 +3037,9 @@ public class HiveMetaStore extends Thrif
 
     @Override
     public List<String> get_partition_names(final String db_name, final String tbl_name,
-        final short max_parts) throws MetaException {
+        final short max_parts) throws MetaException, NoSuchObjectException {
       startTableFunction("get_partition_names", db_name, tbl_name);
-
+      fireReadTablePreEvent(db_name, tbl_name);
       List<String> ret = null;
       Exception ex = null;
       try {
@@ -3010,14 +3156,7 @@ public class HiveMetaStore extends Thrif
       Exception ex = null;
       try {
         for (Partition tmpPart : new_parts) {
-          try {
-            for (MetaStorePreEventListener listener : preListeners) {
-              listener.onEvent(
-                  new PreAlterPartitionEvent(db_name, tbl_name, null, tmpPart, this));
-            }
-          } catch (NoSuchObjectException e) {
-            throw new MetaException(e.getMessage());
-          }
+          firePreEvent(new PreAlterPartitionEvent(db_name, tbl_name, null, tmpPart, this));
         }
         oldParts = alterHandler.alterPartitions(getMS(), wh, db_name, tbl_name, new_parts);
 
@@ -3122,7 +3261,7 @@ public class HiveMetaStore extends Thrif
       boolean success = false;
       Exception ex = null;
       try {
-        Table oldt = get_table(dbname, name);
+        Table oldt = get_table_core(dbname, name);
         firePreEvent(new PreAlterTableEvent(oldt, newTable, this));
         alterHandler.alterTable(getMS(), wh, dbname, name, newTable);
         success = true;
@@ -3206,7 +3345,7 @@ public class HiveMetaStore extends Thrif
       Exception ex = null;
       try {
         try {
-          tbl = get_table(db, base_table_name);
+          tbl = get_table_core(db, base_table_name);
         } catch (NoSuchObjectException e) {
           throw new UnknownTableException(e.getMessage());
         }
@@ -3266,7 +3405,7 @@ public class HiveMetaStore extends Thrif
 
         Table tbl;
         try {
-          tbl = get_table(db, base_table_name);
+          tbl = get_table_core(db, base_table_name);
         } catch (NoSuchObjectException e) {
           throw new UnknownTableException(e.getMessage());
         }
@@ -3385,6 +3524,7 @@ public class HiveMetaStore extends Thrif
     private Partition get_partition_by_name_core(final RawStore ms, final String db_name,
         final String tbl_name, final String part_name)
         throws MetaException, NoSuchObjectException, TException {
+      fireReadTablePreEvent(db_name, tbl_name);
       List<String> partVals = null;
       try {
         partVals = getPartValsFromName(ms, db_name, tbl_name, part_name);
@@ -3406,7 +3546,6 @@ public class HiveMetaStore extends Thrif
 
       startFunction("get_partition_by_name", ": db=" + db_name + " tbl="
           + tbl_name + " part=" + part_name);
-
       Partition ret = null;
       Exception ex = null;
       try {
@@ -3536,6 +3675,7 @@ public class HiveMetaStore extends Thrif
         final List<String> groupNames) throws MetaException, TException, NoSuchObjectException {
       startPartitionFunction("get_partitions_ps_with_auth", db_name, tbl_name,
           part_vals);
+      fireReadTablePreEvent(db_name, tbl_name);
       List<Partition> ret = null;
       Exception ex = null;
       try {
@@ -3558,6 +3698,7 @@ public class HiveMetaStore extends Thrif
         final String tbl_name, final List<String> part_vals, final short max_parts)
         throws MetaException, TException, NoSuchObjectException {
       startPartitionFunction("get_partitions_names_ps", db_name, tbl_name, part_vals);
+      fireReadTablePreEvent(db_name, tbl_name);
       List<String> ret = null;
       Exception ex = null;
       try {
@@ -3726,7 +3867,7 @@ public class HiveMetaStore extends Thrif
         String idxTblName = index.getIndexTableName();
         if (idxTblName != null) {
           String[] qualified = MetaStoreUtils.getQualifiedName(index.getDbName(), idxTblName);
-          Table tbl = get_table(qualified[0], qualified[1]);
+          Table tbl = get_table_core(qualified[0], qualified[1]);
           if (tbl.getSd() == null) {
             throw new MetaException("Table metadata is corrupted");
           }
@@ -4028,7 +4169,7 @@ public class HiveMetaStore extends Thrif
       } finally {
         endFunction("write_partition_column_statistics: ", ret != false, null, tableName);
       }
-    } 
+    }
 
     @Override
     public boolean delete_partition_column_statistics(String dbName, String tableName,
@@ -4083,7 +4224,7 @@ public class HiveMetaStore extends Thrif
         final String tblName, final String filter, final short maxParts)
         throws MetaException, NoSuchObjectException, TException {
       startTableFunction("get_partitions_by_filter", dbName, tblName);
-
+      fireReadTablePreEvent(dbName, tblName);
       List<Partition> ret = null;
       Exception ex = null;
       try {
@@ -4106,7 +4247,7 @@ public class HiveMetaStore extends Thrif
 
       List<PartitionSpec> partitionSpecs = null;
       try {
-        Table table = get_table(dbName, tblName);
+        Table table = get_table_core(dbName, tblName);
         List<Partition> partitions = get_partitions_by_filter(dbName, tblName, filter, (short) maxParts);
 
         if (is_partition_spec_grouping_enabled(table)) {
@@ -4133,6 +4274,7 @@ public class HiveMetaStore extends Thrif
         PartitionsByExprRequest req) throws TException {
       String dbName = req.getDbName(), tblName = req.getTblName();
       startTableFunction("get_partitions_by_expr", dbName, tblName);
+      fireReadTablePreEvent(dbName, tblName);
       PartitionsByExprResult ret = null;
       Exception ex = null;
       try {
@@ -4169,7 +4311,7 @@ public class HiveMetaStore extends Thrif
         throws MetaException, NoSuchObjectException, TException {
 
       startTableFunction("get_partitions_by_names", dbName, tblName);
-
+      fireReadTablePreEvent(dbName, tblName);
       List<Partition> ret = null;
       Exception ex = null;
       try {
@@ -4214,7 +4356,7 @@ public class HiveMetaStore extends Thrif
       List<String> partValue = hiveObject.getPartValues();
       if (partValue != null && partValue.size() > 0) {
         try {
-          Table table = get_table(hiveObject.getDbName(), hiveObject
+          Table table = get_table_core(hiveObject.getDbName(), hiveObject
               .getObjectName());
           partName = Warehouse
               .makePartName(table.getPartitionKeys(), partValue);
@@ -4658,7 +4800,7 @@ public class HiveMetaStore extends Thrif
         if (dbName == null) {
           return getMS().listPrincipalPartitionColumnGrantsAll(principalName, principalType);
         }
-        Table tbl = get_table(dbName, tableName);
+        Table tbl = get_table_core(dbName, tableName);
         String partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues);
         if (principalName == null) {
           return getMS().listPartitionColumnGrantsAll(dbName, tableName, partName, columnName);
@@ -4736,7 +4878,7 @@ public class HiveMetaStore extends Thrif
         if (dbName == null) {
           return getMS().listPrincipalPartitionGrantsAll(principalName, principalType);
         }
-        Table tbl = get_table(dbName, tableName);
+        Table tbl = get_table_core(dbName, tableName);
         String partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues);
         if (principalName == null) {
           return getMS().listPartitionGrantsAll(dbName, tableName, partName);
@@ -5394,14 +5536,21 @@ public class HiveMetaStore extends Thrif
     }
   }
 
-  
-  public static IHMSHandler newHMSHandler(String name, HiveConf hiveConf) throws MetaException {
-    return newHMSHandler(name, hiveConf, false);
+
+  public static IHMSHandler newRetryingHMSHandler(IHMSHandler baseHandler, HiveConf hiveConf)
+      throws MetaException {
+    return newRetryingHMSHandler(baseHandler, hiveConf, false);
   }
 
-  public static IHMSHandler newHMSHandler(String name, HiveConf hiveConf, boolean local)
+  public static IHMSHandler newRetryingHMSHandler(IHMSHandler baseHandler, HiveConf hiveConf,
+      boolean local) throws MetaException {
+    return RetryingHMSHandler.getProxy(hiveConf, baseHandler, local);
+  }
+
+  public static Iface newRetryingHMSHandler(String name, HiveConf conf, boolean local)
       throws MetaException {
-    return RetryingHMSHandler.getProxy(hiveConf, name, local);
+    HMSHandler baseHandler = new HiveMetaStore.HMSHandler(name, conf, false);
+    return RetryingHMSHandler.getProxy(conf, baseHandler, local);
   }
 
   /**
@@ -5610,6 +5759,9 @@ public class HiveMetaStore extends Thrif
 
       TProcessor processor;
       TTransportFactory transFactory;
+      HMSHandler baseHandler = new HiveMetaStore.HMSHandler("new db based metaserver", conf,
+          false);
+      IHMSHandler handler = newRetryingHMSHandler(baseHandler, conf);
       if (useSasl) {
         // we are in secure mode.
         if (useFramedTransport) {
@@ -5619,17 +5771,14 @@ public class HiveMetaStore extends Thrif
             conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_KEYTAB_FILE),
             conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL));
         // start delegation token manager
-        HMSHandler hmsHandler = new HMSHandler("new db based metaserver", conf);
-        saslServer.startDelegationTokenSecretManager(conf, hmsHandler);
+        saslServer.startDelegationTokenSecretManager(conf, baseHandler.getMS());
         transFactory = saslServer.createTransportFactory(
                 MetaStoreUtils.getMetaStoreSaslProperties(conf));
         processor = saslServer.wrapProcessor(
-          new ThriftHiveMetastore.Processor<HMSHandler>(hmsHandler));
+          new ThriftHiveMetastore.Processor<IHMSHandler>(handler));
         LOG.info("Starting DB backed MetaStore Server in Secure Mode");
       } else {
         // we are in unsecure mode.
-        IHMSHandler handler = newHMSHandler("new db based metaserver", conf);
-
         if (conf.getBoolVar(ConfVars.METASTORE_EXECUTE_SET_UGI)) {
           transFactory = useFramedTransport ?
               new ChainedTTransportFactory(new TFramedTransport.Factory(),
@@ -5784,7 +5933,7 @@ public class HiveMetaStore extends Thrif
     LOG.info("Starting metastore thread of type " + thread.getClass().getName());
     thread.setHiveConf(conf);
     thread.setThreadId(nextThreadId++);
-    thread.init(new MetaStoreThread.BooleanPointer());
+    thread.init(new MetaStoreThread.BooleanPointer(), new MetaStoreThread.BooleanPointer());
     thread.start();
   }
 }

Modified: hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java?rev=1631841&r1=1631840&r2=1631841&view=diff
==============================================================================
--- hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (original)
+++ hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java Tue Oct 14 19:06:45 2014
@@ -28,7 +28,6 @@ import java.lang.reflect.Method;
 import java.lang.reflect.Proxy;
 import java.net.InetAddress;
 import java.net.URI;
-import java.net.URISyntaxException;
 import java.net.UnknownHostException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
@@ -98,7 +97,6 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.PartitionEventType;
-import org.apache.hadoop.hive.metastore.api.PartitionSpec;
 import org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest;
 import org.apache.hadoop.hive.metastore.api.PartitionsByExprResult;
 import org.apache.hadoop.hive.metastore.api.PartitionsStatsRequest;
@@ -122,7 +120,6 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
 import org.apache.hadoop.hive.metastore.api.UnknownTableException;
 import org.apache.hadoop.hive.metastore.api.UnlockRequest;
-import org.apache.hadoop.hive.metastore.partition.spec.CompositePartitionSpecProxy;
 import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.hadoop.hive.metastore.txn.TxnHandler;
 import org.apache.hadoop.hive.shims.HadoopShims;
@@ -178,7 +175,7 @@ public class HiveMetaStoreClient impleme
     if (localMetaStore) {
       // instantiate the metastore server handler directly instead of connecting
       // through the network
-      client = HiveMetaStore.newHMSHandler("hive client", conf, true);
+      client = HiveMetaStore.newRetryingHMSHandler("hive client", conf, true);
       isConnected = true;
       snapshotActiveConf();
       return;
@@ -763,18 +760,35 @@ public class HiveMetaStoreClient impleme
   }
 
   /**
-   * @param name
-   * @param dbname
-   * @throws NoSuchObjectException
-   * @throws MetaException
-   * @throws TException
-   * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String,
-   *      java.lang.String, boolean)
+   * {@inheritDoc}
+   * @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
    */
   @Override
-  public void dropTable(String dbname, String name)
-      throws NoSuchObjectException, MetaException, TException {
-    dropTable(dbname, name, true, true, null);
+  public void dropTable(String dbname, String name, boolean deleteData,
+      boolean ignoreUnknownTab) throws MetaException, TException,
+      NoSuchObjectException, UnsupportedOperationException {
+    dropTable(dbname, name, deleteData, ignoreUnknownTab, null);
+  }
+
+  /**
+   * Drop the table and choose whether to save the data in the trash.
+   * @param ifPurge completely purge the table (skipping trash) while removing
+   *                data from warehouse
+   * @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
+   */
+  @Override
+  public void dropTable(String dbname, String name, boolean deleteData,
+      boolean ignoreUnknownTab, boolean ifPurge)
+      throws MetaException, TException, NoSuchObjectException, UnsupportedOperationException {
+    //build new environmentContext with ifPurge;
+    EnvironmentContext envContext = null;
+    if(ifPurge){
+      Map<String, String> warehouseOptions = null;
+      warehouseOptions = new HashMap<String, String>();
+      warehouseOptions.put("ifPurge", "TRUE");
+      envContext = new EnvironmentContext(warehouseOptions);
+    }
+    dropTable(dbname, name, deleteData, ignoreUnknownTab, envContext);
   }
 
   /** {@inheritDoc} */
@@ -786,23 +800,37 @@ public class HiveMetaStoreClient impleme
   }
 
   /**
+   * @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
+   */
+  @Override
+  public void dropTable(String dbname, String name)
+      throws NoSuchObjectException, MetaException, TException {
+    dropTable(dbname, name, true, true, null);
+  }
+
+  /**
+   * Drop the table and choose whether to: delete the underlying table data;
+   * throw if the table doesn't exist; save the data in the trash.
+   *
    * @param dbname
    * @param name
    * @param deleteData
    *          delete the underlying data or just delete the table in metadata
-   * @throws NoSuchObjectException
+   * @param ignoreUnknownTab
+   *          don't throw if the requested table doesn't exist
+   * @param envContext
+   *          for communicating with thrift
    * @throws MetaException
+   *           could not drop table properly
+   * @throws NoSuchObjectException
+   *           the table wasn't found
    * @throws TException
+   *           a thrift communication error occurred
+   * @throws UnsupportedOperationException
+   *           dropping an index table is not allowed
    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String,
    *      java.lang.String, boolean)
    */
-  @Override
-  public void dropTable(String dbname, String name, boolean deleteData,
-      boolean ignoreUnknownTab) throws MetaException, TException,
-      NoSuchObjectException, UnsupportedOperationException {
-    dropTable(dbname, name, deleteData, ignoreUnknownTab, null);
-  }
-
   public void dropTable(String dbname, String name, boolean deleteData,
       boolean ignoreUnknownTab, EnvironmentContext envContext) throws MetaException, TException,
       NoSuchObjectException, UnsupportedOperationException {
@@ -1283,6 +1311,7 @@ public class HiveMetaStoreClient impleme
   }
 
   /** {@inheritDoc} */
+  @Override
   public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request)
     throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
     InvalidInputException{
@@ -1659,7 +1688,12 @@ public class HiveMetaStoreClient impleme
 
   @Override
   public ValidTxnList getValidTxns() throws TException {
-    return TxnHandler.createValidTxnList(client.get_open_txns());
+    return TxnHandler.createValidTxnList(client.get_open_txns(), 0);
+  }
+
+  @Override
+  public ValidTxnList getValidTxns(long currentTxn) throws TException {
+    return TxnHandler.createValidTxnList(client.get_open_txns(), currentTxn);
   }
 
   @Override

Modified: hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java?rev=1631841&r1=1631840&r2=1631841&view=diff
==============================================================================
--- hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java (original)
+++ hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java Tue Oct 14 19:06:45 2014
@@ -37,12 +37,14 @@ public class HiveMetaStoreFsImpl impleme
 
   @Override
   public boolean deleteDir(FileSystem fs, Path f, boolean recursive,
-      Configuration conf) throws MetaException {
+      boolean ifPurge, Configuration conf) throws MetaException {
     LOG.info("deleting  " + f);
     HadoopShims hadoopShim = ShimLoader.getHadoopShims();
 
     try {
-      if (hadoopShim.moveToAppropriateTrash(fs, f, conf)) {
+      if (ifPurge) {
+        LOG.info("Not moving "+ f +" to trash");
+      } else if (hadoopShim.moveToAppropriateTrash(fs, f, conf)) {
         LOG.info("Moved to trash: " + f);
         return true;
       }

Modified: hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java?rev=1631841&r1=1631840&r2=1631841&view=diff
==============================================================================
--- hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (original)
+++ hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java Tue Oct 14 19:06:45 2014
@@ -192,6 +192,10 @@ public interface IMetaStoreClient {
    *          The database for this table
    * @param tableName
    *          The table to drop
+   * @param deleteData
+   *          Should we delete the underlying data
+   * @param ignoreUnknownTab
+   *          don't throw if the requested table doesn't exist
    * @throws MetaException
    *           Could not drop table properly.
    * @throws NoSuchObjectException
@@ -200,7 +204,16 @@ public interface IMetaStoreClient {
    *           A thrift communication error occurred
    */
   void dropTable(String dbname, String tableName, boolean deleteData,
-      boolean ignoreUknownTab) throws MetaException, TException,
+      boolean ignoreUnknownTab) throws MetaException, TException,
+      NoSuchObjectException;
+
+  /**
+   * @param ifPurge
+   *          completely purge the table (skipping trash) while removing data from warehouse
+   * @see #dropTable(String, String, boolean, boolean)
+   */
+  public void dropTable(String dbname, String tableName, boolean deleteData,
+      boolean ignoreUnknownTab, boolean ifPurge) throws MetaException, TException,
       NoSuchObjectException;
 
   /**
@@ -226,6 +239,9 @@ public interface IMetaStoreClient {
   void dropTable(String tableName, boolean deleteData)
       throws MetaException, UnknownTableException, TException, NoSuchObjectException;
 
+  /**
+   * @see #dropTable(String, String, boolean, boolean)
+   */
   void dropTable(String dbname, String tableName)
       throws MetaException, TException, NoSuchObjectException;
 
@@ -1070,6 +1086,15 @@ public interface IMetaStoreClient {
   ValidTxnList getValidTxns() throws TException;
 
   /**
+   * Get a structure that details valid transactions.
+   * @param currentTxn The current transaction of the caller.  This will be removed from the
+   *                   exceptions list so that the caller sees records from his own transaction.
+   * @return list of valid transactions
+   * @throws TException
+   */
+  ValidTxnList getValidTxns(long currentTxn) throws TException;
+
+  /**
    * Initiate a transaction.
    * @param user User who is opening this transaction.  This is the Hive user,
    *             not necessarily the OS user.  It is assumed that this user has already been

Modified: hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java?rev=1631841&r1=1631840&r2=1631841&view=diff
==============================================================================
--- hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java (original)
+++ hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java Tue Oct 14 19:06:45 2014
@@ -32,11 +32,12 @@ public interface MetaStoreFS {
    * delete a directory
    *
    * @param f
+   * @param ifPurge
    * @param recursive
    * @return true on success
    * @throws MetaException
    */
   public boolean deleteDir(FileSystem fs, Path f, boolean recursive,
-      Configuration conf) throws MetaException;
+      boolean ifPurge, Configuration conf) throws MetaException;
 
 }

Modified: hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java?rev=1631841&r1=1631840&r2=1631841&view=diff
==============================================================================
--- hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java (original)
+++ hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java Tue Oct 14 19:06:45 2014
@@ -41,25 +41,30 @@ public class MetaStoreInit {
   }
 
   /**
-   * Updates the connection URL in hiveConf using the hook
-   *
+   * Updates the connection URL in hiveConf using the hook (if a hook has been
+   * set using hive.metastore.ds.connection.url.hook property)
+   * @param originalConf - original configuration used to look up hook settings
+   * @param activeConf - the configuration file in use for looking up db url
+   * @param badUrl
+   * @param updateData - hook information
    * @return true if a new connection URL was loaded into the thread local
    *         configuration
+   * @throws MetaException
    */
-  static boolean updateConnectionURL(HiveConf hiveConf, Configuration conf,
+  static boolean updateConnectionURL(HiveConf originalConf, Configuration activeConf,
     String badUrl, MetaStoreInitData updateData)
       throws MetaException {
     String connectUrl = null;
-    String currentUrl = MetaStoreInit.getConnectionURL(conf);
+    String currentUrl = MetaStoreInit.getConnectionURL(activeConf);
     try {
       // We always call init because the hook name in the configuration could
       // have changed.
-      MetaStoreInit.initConnectionUrlHook(hiveConf, updateData);
+      MetaStoreInit.initConnectionUrlHook(originalConf, updateData);
       if (updateData.urlHook != null) {
         if (badUrl != null) {
           updateData.urlHook.notifyBadConnectionUrl(badUrl);
         }
-        connectUrl = updateData.urlHook.getJdoConnectionUrl(hiveConf);
+        connectUrl = updateData.urlHook.getJdoConnectionUrl(originalConf);
       }
     } catch (Exception e) {
       LOG.error("Exception while getting connection URL from the hook: " +
@@ -71,7 +76,7 @@ public class MetaStoreInit {
           String.format("Overriding %s with %s",
               HiveConf.ConfVars.METASTORECONNECTURLKEY.toString(),
               connectUrl));
-      conf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.toString(),
+      activeConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.toString(),
           connectUrl);
       return true;
     }

Modified: hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java?rev=1631841&r1=1631840&r2=1631841&view=diff
==============================================================================
--- hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java (original)
+++ hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java Tue Oct 14 19:06:45 2014
@@ -43,8 +43,13 @@ public interface MetaStoreThread {
    * have been called.
    * @param stop a flag to watch for when to stop.  If this value is set to true,
    *             the thread will terminate the next time through its main loop.
+   * @param looped a flag that is set to true everytime a thread goes through it's main loop.
+   *               This is purely for testing so that tests can assure themselves that the thread
+   *               has run through it's loop once.  The test can set this value to false.  The
+   *               thread should then assure that the loop has been gone completely through at
+   *               least once.
    */
-  void init(BooleanPointer stop) throws MetaException;
+  void init(BooleanPointer stop, BooleanPointer looped) throws MetaException;
 
   /**
    * Run the thread in the background.  This must not be called until

Modified: hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java?rev=1631841&r1=1631840&r2=1631841&view=diff
==============================================================================
--- hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (original)
+++ hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java Tue Oct 14 19:06:45 2014
@@ -258,7 +258,7 @@ public class MetaStoreUtils {
         if (oldPart.getParameters().containsKey(stat)) {
           Long oldStat = Long.parseLong(oldPart.getParameters().get(stat));
           Long newStat = Long.parseLong(newPart.getParameters().get(stat));
-          if (oldStat != newStat) {
+          if (!oldStat.equals(newStat)) {
             return true;
           }
         }
@@ -993,7 +993,7 @@ public class MetaStoreUtils {
       partString = partString.concat(partStringSep);
       partString = partString.concat(partKey.getName());
       partTypesString = partTypesString.concat(partTypesStringSep);
-      partTypesString = partTypesString.concat(partKey.getType());      
+      partTypesString = partTypesString.concat(partKey.getType());
       if (partStringSep.length() == 0) {
         partStringSep = "/";
         partTypesStringSep = ":";
@@ -1007,7 +1007,7 @@ public class MetaStoreUtils {
       schema
       .setProperty(
           org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMN_TYPES,
-          partTypesString);      
+          partTypesString);
     }
 
     if (parameters != null) {

Modified: hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java?rev=1631841&r1=1631840&r2=1631841&view=diff
==============================================================================
--- hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java (original)
+++ hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/RetryingHMSHandler.java Tue Oct 14 19:06:45 2014
@@ -41,34 +41,34 @@ public class RetryingHMSHandler implemen
 
   private static final Log LOG = LogFactory.getLog(RetryingHMSHandler.class);
 
-  private final IHMSHandler base;
+  private final IHMSHandler baseHandler;
   private final MetaStoreInit.MetaStoreInitData metaStoreInitData =
     new MetaStoreInit.MetaStoreInitData();
 
-  private final HiveConf hiveConf;            // base configuration
-  private final Configuration configuration;  // active configuration
+  private final HiveConf origConf;            // base configuration
+  private final Configuration activeConf;  // active configuration
 
-  private RetryingHMSHandler(HiveConf hiveConf, String name, boolean local) throws MetaException {
-    this.hiveConf = hiveConf;
-    this.base = new HiveMetaStore.HMSHandler(name, hiveConf, false);
+  private RetryingHMSHandler(HiveConf hiveConf, IHMSHandler baseHandler, boolean local) throws MetaException {
+    this.origConf = hiveConf;
+    this.baseHandler = baseHandler;
     if (local) {
-      base.setConf(hiveConf); // tests expect configuration changes applied directly to metastore
+      baseHandler.setConf(hiveConf); // tests expect configuration changes applied directly to metastore
     }
-    configuration = base.getConf();
+    activeConf = baseHandler.getConf();
 
     // This has to be called before initializing the instance of HMSHandler
     // Using the hook on startup ensures that the hook always has priority
     // over settings in *.xml.  The thread local conf needs to be used because at this point
     // it has already been initialized using hiveConf.
-    MetaStoreInit.updateConnectionURL(hiveConf, getConf(), null, metaStoreInitData);
+    MetaStoreInit.updateConnectionURL(hiveConf, getActiveConf(), null, metaStoreInitData);
 
-    base.init();
+    baseHandler.init();
   }
 
-  public static IHMSHandler getProxy(HiveConf hiveConf, String name, boolean local)
+  public static IHMSHandler getProxy(HiveConf hiveConf, IHMSHandler baseHandler, boolean local)
       throws MetaException {
 
-    RetryingHMSHandler handler = new RetryingHMSHandler(hiveConf, name, local);
+    RetryingHMSHandler handler = new RetryingHMSHandler(hiveConf, baseHandler, local);
 
     return (IHMSHandler) Proxy.newProxyInstance(
       RetryingHMSHandler.class.getClassLoader(),
@@ -79,15 +79,15 @@ public class RetryingHMSHandler implemen
   public Object invoke(final Object proxy, final Method method, final Object[] args) throws Throwable {
 
     boolean gotNewConnectUrl = false;
-    boolean reloadConf = HiveConf.getBoolVar(hiveConf,
+    boolean reloadConf = HiveConf.getBoolVar(origConf,
         HiveConf.ConfVars.HMSHANDLERFORCERELOADCONF);
-    long retryInterval = HiveConf.getTimeVar(hiveConf,
+    long retryInterval = HiveConf.getTimeVar(origConf,
         HiveConf.ConfVars.HMSHANDLERINTERVAL, TimeUnit.MILLISECONDS);
-    int retryLimit = HiveConf.getIntVar(hiveConf,
+    int retryLimit = HiveConf.getIntVar(origConf,
         HiveConf.ConfVars.HMSHANDLERATTEMPTS);
 
     if (reloadConf) {
-      MetaStoreInit.updateConnectionURL(hiveConf, getConf(),
+      MetaStoreInit.updateConnectionURL(origConf, getActiveConf(),
         null, metaStoreInitData);
     }
 
@@ -96,9 +96,9 @@ public class RetryingHMSHandler implemen
     while (true) {
       try {
         if (reloadConf || gotNewConnectUrl) {
-          base.setConf(getConf());
+          baseHandler.setConf(getActiveConf());
         }
-        return method.invoke(base, args);
+        return method.invoke(baseHandler, args);
 
       } catch (javax.jdo.JDOException e) {
         caughtException = e;
@@ -158,13 +158,13 @@ public class RetryingHMSHandler implemen
       Thread.sleep(retryInterval);
       // If we have a connection error, the JDO connection URL hook might
       // provide us with a new URL to access the datastore.
-      String lastUrl = MetaStoreInit.getConnectionURL(getConf());
-      gotNewConnectUrl = MetaStoreInit.updateConnectionURL(hiveConf, getConf(),
+      String lastUrl = MetaStoreInit.getConnectionURL(getActiveConf());
+      gotNewConnectUrl = MetaStoreInit.updateConnectionURL(origConf, getActiveConf(),
         lastUrl, metaStoreInitData);
     }
   }
 
-  public Configuration getConf() {
-    return configuration;
+  public Configuration getActiveConf() {
+    return activeConf;
   }
 }

Modified: hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java?rev=1631841&r1=1631840&r2=1631841&view=diff
==============================================================================
--- hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java (original)
+++ hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java Tue Oct 14 19:06:45 2014
@@ -57,6 +57,6 @@ public class TSetIpAddressProcessor<I ex
   }
 
   protected void setIpAddress(final Socket inSocket) {
-    HMSHandler.setIpAddress(inSocket.getInetAddress().toString());
+    HMSHandler.setIpAddress(inSocket.getInetAddress().getHostAddress());
   }
 }

Modified: hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java?rev=1631841&r1=1631840&r2=1631841&view=diff
==============================================================================
--- hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java (original)
+++ hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java Tue Oct 14 19:06:45 2014
@@ -224,8 +224,12 @@ public class Warehouse {
   }
 
   public boolean deleteDir(Path f, boolean recursive) throws MetaException {
+    return deleteDir(f, recursive, false);
+  }
+
+  public boolean deleteDir(Path f, boolean recursive, boolean ifPurge) throws MetaException {
     FileSystem fs = getFs(f);
-    return fsHandler.deleteDir(fs, f, recursive, conf);
+    return fsHandler.deleteDir(fs, f, recursive, ifPurge, conf);
   }
 
   public boolean isEmpty(Path path) throws IOException, MetaException {

Modified: hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java?rev=1631841&r1=1631840&r2=1631841&view=diff
==============================================================================
--- hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java (original)
+++ hive/branches/llap/metastore/src/java/org/apache/hadoop/hive/metastore/events/PreEventContext.java Tue Oct 14 19:06:45 2014
@@ -38,6 +38,8 @@ public abstract class PreEventContext {
     DROP_DATABASE,
     LOAD_PARTITION_DONE,
     AUTHORIZATION_API_CALL,
+    READ_TABLE,
+    READ_DATABASE
   }
 
   private final PreEventType eventType;