You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ga...@apache.org on 2017/11/03 22:49:46 UTC

[2/7] hive git commit: HIVE-17967 Move HiveMetaStore class

http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java
index d3eee85..c880a9a 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java
@@ -60,7 +60,7 @@ public class MetaStoreInit {
     try {
       // We always call init because the hook name in the configuration could
       // have changed.
-      MetaStoreInit.initConnectionUrlHook(originalConf, updateData);
+      initConnectionUrlHook(originalConf, updateData);
       if (updateData.urlHook != null) {
         if (badUrl != null) {
           updateData.urlHook.notifyBadConnectionUrl(badUrl);

http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java
index 2671c1f..2fcc162 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartFilterExprUtil.java
@@ -80,10 +80,18 @@ public class PartFilterExprUtil {
     try {
       @SuppressWarnings("unchecked")
       Class<? extends PartitionExpressionProxy> clazz =
-           JavaUtils.getClass(className, PartitionExpressionProxy.class);
+          JavaUtils.getClass(className, PartitionExpressionProxy.class);
       return JavaUtils.newInstance(
           clazz, new Class<?>[0], new Object[0]);
     } catch (MetaException e) {
+      if (e.getMessage().matches(".* class not found")) {
+        // TODO MS-SPLIT For now if we cannot load the default PartitionExpressionForMetastore
+        // class (since it's from ql) load the DefaultPartitionExpressionProxy, which just throws
+        // UnsupportedOperationExceptions.  This allows existing Hive instances to work but also
+        // allows us to instantiate the metastore stand alone for testing.  Not sure if this is
+        // the best long term solution.
+        return new DefaultPartitionExpressionProxy();
+      }
       LOG.error("Error loading PartitionExpressionProxy", e);
       throw new RuntimeException("Error loading PartitionExpressionProxy: " + e.getMessage());
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StorageSchemaReader.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StorageSchemaReader.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StorageSchemaReader.java
new file mode 100644
index 0000000..6251e23
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/StorageSchemaReader.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Table;
+
+import java.util.List;
+
+/**
+ * An interface to implement reading schemas from stored data.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+interface StorageSchemaReader {
+  /**
+   * Read the schema from the storage representation of the table.
+   * @param tbl metastore table object
+   * @param envContext environment context
+   * @param conf current configuration file
+   * @return list of field schemas
+   * @throws MetaException if the table storage could not be read
+   */
+ List<FieldSchema> readSchema(Table tbl, EnvironmentContext envContext, Configuration conf)
+     throws MetaException;
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java
new file mode 100644
index 0000000..c0c9604
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TSetIpAddressProcessor.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.lang.reflect.InvocationTargetException;
+import java.net.Socket;
+
+import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
+import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
+import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface;
+import org.apache.thrift.TException;
+import org.apache.thrift.protocol.TProtocol;
+import org.apache.thrift.transport.TSocket;
+import org.apache.thrift.transport.TTransport;
+
+/**
+ * TSetIpAddressProcessor passes the IP address of the Thrift client to the HMSHandler.
+ */
+public class TSetIpAddressProcessor<I extends Iface> extends ThriftHiveMetastore.Processor<Iface> {
+
+  @SuppressWarnings("unchecked")
+  public TSetIpAddressProcessor(I iface) throws SecurityException, NoSuchFieldException,
+    IllegalArgumentException, IllegalAccessException, NoSuchMethodException,
+    InvocationTargetException {
+    super(iface);
+  }
+
+  @Override
+  public boolean process(final TProtocol in, final TProtocol out) throws TException {
+    setIpAddress(in);
+
+    return super.process(in, out);
+  }
+
+  protected void setIpAddress(final TProtocol in) {
+    TTransport transport = in.getTransport();
+    if (!(transport instanceof TSocket)) {
+      return;
+    }
+    setIpAddress(((TSocket)transport).getSocket());
+  }
+
+  protected void setIpAddress(final Socket inSocket) {
+    HMSHandler.setThreadLocalIpAddress(inSocket.getInetAddress().getHostAddress());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java
new file mode 100644
index 0000000..5285b54
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.io.IOException;
+import java.lang.reflect.InvocationTargetException;
+import java.net.Socket;
+import java.security.PrivilegedExceptionAction;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
+import org.apache.hadoop.hive.metastore.security.TUGIContainingTransport;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface;
+import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.set_ugi_args;
+import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.set_ugi_result;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.thrift.ProcessFunction;
+import org.apache.thrift.TApplicationException;
+import org.apache.thrift.TBase;
+import org.apache.thrift.TException;
+import org.apache.thrift.protocol.TMessage;
+import org.apache.thrift.protocol.TMessageType;
+import org.apache.thrift.protocol.TProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.protocol.TProtocolUtil;
+import org.apache.thrift.protocol.TType;
+
+/** TUGIBasedProcessor is used in unsecure mode for thrift metastore client server communication.
+ *  This processor checks whether the first rpc call after connection is set up is set_ugi()
+ *  through which client sends ugi to server. Processor then perform all subsequent rpcs on the
+ *  connection using ugi.doAs() so all actions are performed in client user context.
+ *  Note that old clients will never call set_ugi() and thus ugi will never be received on server
+ *  side, in which case server exhibits previous behavior and continues as usual.
+ */
+@SuppressWarnings("rawtypes")
+public class TUGIBasedProcessor<I extends Iface> extends TSetIpAddressProcessor<Iface> {
+
+  private final I iface;
+  private final Map<String,  org.apache.thrift.ProcessFunction<Iface, ? extends  TBase>>
+    functions;
+  static final Logger LOG = LoggerFactory.getLogger(TUGIBasedProcessor.class);
+
+  public TUGIBasedProcessor(I iface) throws SecurityException, NoSuchFieldException,
+    IllegalArgumentException, IllegalAccessException, NoSuchMethodException,
+    InvocationTargetException {
+    super(iface);
+    this.iface = iface;
+    this.functions = getProcessMapView();
+  }
+
+  @SuppressWarnings("unchecked")
+  @Override
+  public boolean process(final TProtocol in, final TProtocol out) throws TException {
+    setIpAddress(in);
+
+    final TMessage msg = in.readMessageBegin();
+    final ProcessFunction<Iface, ? extends  TBase> fn = functions.get(msg.name);
+    if (fn == null) {
+      TProtocolUtil.skip(in, TType.STRUCT);
+      in.readMessageEnd();
+      TApplicationException x = new TApplicationException(TApplicationException.UNKNOWN_METHOD,
+          "Invalid method name: '"+msg.name+"'");
+      out.writeMessageBegin(new TMessage(msg.name, TMessageType.EXCEPTION, msg.seqid));
+      x.write(out);
+      out.writeMessageEnd();
+      out.getTransport().flush();
+      return true;
+    }
+    TUGIContainingTransport ugiTrans = (TUGIContainingTransport)in.getTransport();
+    // Store ugi in transport if the rpc is set_ugi
+    if (msg.name.equalsIgnoreCase("set_ugi")){
+      try {
+        handleSetUGI(ugiTrans, (ThriftHiveMetastore.Processor.set_ugi<Iface>)fn, msg, in, out);
+      } catch (TException e) {
+        throw e;
+      } catch (Exception e) {
+        throw new TException(e.getCause());
+      }
+      return true;
+    }
+    UserGroupInformation clientUgi = ugiTrans.getClientUGI();
+    if (null == clientUgi){
+      // At this point, transport must contain client ugi, if it doesn't then its an old client.
+      fn.process(msg.seqid, in, out, iface);
+      return true;
+    } else { // Found ugi, perform doAs().
+      PrivilegedExceptionAction<Void> pvea = new PrivilegedExceptionAction<Void>() {
+        @Override
+        public Void run() {
+          try {
+            fn.process(msg.seqid,in, out, iface);
+            return null;
+          } catch (TException te) {
+            throw new RuntimeException(te);
+          }
+        }
+      };
+      try {
+        clientUgi.doAs(pvea);
+        return true;
+      } catch (RuntimeException rte) {
+        if (rte.getCause() instanceof TException) {
+          throw (TException)rte.getCause();
+        }
+        throw rte;
+      } catch (InterruptedException ie) {
+        throw new RuntimeException(ie); // unexpected!
+      } catch (IOException ioe) {
+        throw new RuntimeException(ioe); // unexpected!
+      } finally {
+          try {
+            FileSystem.closeAllForUGI(clientUgi);
+          } catch (IOException e) {
+            LOG.error("Could not clean up file-system handles for UGI: " + clientUgi, e);
+          }
+      }
+    }
+  }
+
+  private void handleSetUGI(TUGIContainingTransport ugiTrans,
+                            ThriftHiveMetastore.Processor.set_ugi<Iface> fn, TMessage msg, TProtocol iprot, TProtocol oprot)
+      throws TException, SecurityException, NoSuchMethodException, IllegalArgumentException,
+      IllegalAccessException, InvocationTargetException{
+
+    UserGroupInformation clientUgi = ugiTrans.getClientUGI();
+    if( null != clientUgi){
+      throw new TException(new IllegalStateException("UGI is already set. Resetting is not " +
+      "allowed. Current ugi is: " + clientUgi.getUserName()));
+    }
+
+    set_ugi_args args = fn.getEmptyArgsInstance();
+    try {
+      args.read(iprot);
+    } catch (TProtocolException e) {
+      iprot.readMessageEnd();
+      TApplicationException x = new TApplicationException(TApplicationException.PROTOCOL_ERROR,
+          e.getMessage());
+      oprot.writeMessageBegin(new TMessage(msg.name, TMessageType.EXCEPTION, msg.seqid));
+      x.write(oprot);
+      oprot.writeMessageEnd();
+      oprot.getTransport().flush();
+      return;
+    }
+    iprot.readMessageEnd();
+    set_ugi_result result = fn.getResult(iface, args);
+    List<String> principals = result.getSuccess();
+    // Store the ugi in transport and then continue as usual.
+    ugiTrans.setClientUGI(UserGroupInformation.createRemoteUser(principals.remove(principals.size()-1)));
+    oprot.writeMessageBegin(new TMessage(msg.name, TMessageType.REPLY, msg.seqid));
+    result.write(oprot);
+    oprot.writeMessageEnd();
+    oprot.getTransport().flush();
+  }
+
+  @Override
+  protected void setIpAddress(final TProtocol in) {
+    TUGIContainingTransport ugiTrans = (TUGIContainingTransport)in.getTransport();
+    Socket socket = ugiTrans.getSocket();
+    if (socket != null) {
+      setIpAddress(socket);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
index f8ebc12..eb33d1f 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
@@ -19,6 +19,8 @@ package org.apache.hadoop.hive.metastore.conf;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.DefaultStorageSchemaReader;
+import org.apache.hadoop.hive.metastore.HiveAlterHandler;
 import org.apache.hadoop.hive.metastore.security.MetastoreDelegationTokenManager;
 import org.apache.hadoop.hive.metastore.utils.StringUtils;
 import org.slf4j.Logger;
@@ -99,6 +101,7 @@ public class MetastoreConf {
    */
   public static final MetastoreConf.ConfVars[] metaVars = {
       ConfVars.WAREHOUSE,
+      ConfVars.REPLDIR,
       ConfVars.THRIFT_URIS,
       ConfVars.SERVER_PORT,
       ConfVars.THRIFT_CONNECTION_RETRIES,
@@ -239,7 +242,7 @@ public class MetastoreConf {
         "hive.metastore.aggregate.stats.cache.ttl", 600, TimeUnit.SECONDS,
         "Number of seconds for a cached node to be active in the cache before they become stale."),
     ALTER_HANDLER("metastore.alter.handler", "hive.metastore.alter.impl",
-        "org.apache.hadoop.hive.metastore.HiveAlterHandler",
+        HiveAlterHandler.class.getName(),
         "Alter handler.  For now defaults to the Hive one.  Really need a better default option"),
     ASYNC_LOG_ENABLED("metastore.async.log.enabled", "hive.async.log.enabled", true,
         "Whether to enable Log4j2's asynchronous logging. Asynchronous logging can give\n" +
@@ -279,6 +282,10 @@ public class MetastoreConf {
     CLIENT_CONNECT_RETRY_DELAY("metastore.client.connect.retry.delay",
         "hive.metastore.client.connect.retry.delay", 1, TimeUnit.SECONDS,
         "Number of seconds for the client to wait between consecutive connection attempts"),
+    CLIENT_KERBEROS_PRINCIPAL("metastore.client.kerberos.principal",
+        "hive.metastore.client.kerberos.principal",
+        "", // E.g. "hive-metastore/_HOST@EXAMPLE.COM".
+        "The Kerberos principal associated with the HA cluster of hcat_servers."),
     CLIENT_SOCKET_LIFETIME("metastore.client.socket.lifetime",
         "hive.metastore.client.socket.lifetime", 0, TimeUnit.SECONDS,
         "MetaStore Client socket lifetime in seconds. After this time is exceeded, client\n" +
@@ -439,6 +446,10 @@ public class MetastoreConf {
         "hive.metastore.event.message.factory",
         "org.apache.hadoop.hive.metastore.messaging.json.JSONMessageFactory",
         "Factory class for making encoding and decoding messages in the events generated."),
+    EVENT_DB_NOTIFICATION_API_AUTH("metastore.metastore.event.db.notification.api.auth",
+        "hive.metastore.event.db.notification.api.auth", true,
+        "Should metastore do authorization against database notification related APIs such as get_next_notification.\n" +
+            "If set to true, then only the superusers in proxy settings have the permission"),
     EXECUTE_SET_UGI("metastore.execute.setugi", "hive.metastore.execute.setugi", true,
         "In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using \n" +
             "the client's reported user and group permissions. Note that this property must be set on \n" +
@@ -576,6 +587,8 @@ public class MetastoreConf {
         "Inteval for cmroot cleanup thread."),
     REPLCMENABLED("metastore.repl.cm.enabled", "hive.repl.cm.enabled", false,
         "Turn on ChangeManager, so delete files will go to cmrootdir."),
+    REPLDIR("metastore.repl.rootdir", "hive.repl.rootdir", "/user/hive/repl/",
+        "HDFS root dir for all replication dumps."),
     REPL_COPYFILE_MAXNUMFILES("metastore.repl.copyfile.maxnumfiles",
         "hive.exec.copyfile.maxnumfiles", 1L,
         "Maximum number of files Hive uses to do sequential HDFS copies between directories." +
@@ -584,6 +597,10 @@ public class MetastoreConf {
         "hive.exec.copyfile.maxsize", 32L * 1024 * 1024 /*32M*/,
         "Maximum file size (in bytes) that Hive uses to do single HDFS copies between directories." +
             "Distributed copies (distcp) will be used instead for bigger files so that copies can be done faster."),
+    REPL_DUMPDIR_CLEAN_FREQ("metastore.repl.dumpdir.clean.freq", "hive.repl.dumpdir.clean.freq",
+        0, TimeUnit.SECONDS, "Frequency at which timer task runs to purge expired dump dirs."),
+    REPL_DUMPDIR_TTL("metastore.repl.dumpdir.ttl", "hive.repl.dumpdir.ttl", 7, TimeUnit.DAYS,
+        "TTL of dump dirs before cleanup."),
     SCHEMA_INFO_CLASS("metastore.schema.info.class", "hive.metastore.schema.info.class",
         "org.apache.hadoop.hive.metastore.MetaStoreSchemaInfo",
         "Fully qualified class name for the metastore schema information class \n"
@@ -646,6 +663,10 @@ public class MetastoreConf {
         "The Java class (implementing the StatsAggregator interface) that is used by default if hive.stats.dbclass is custom type."),
     STATS_DEFAULT_PUBLISHER("metastore.stats.default.publisher", "hive.stats.default.publisher", "",
         "The Java class (implementing the StatsPublisher interface) that is used by default if hive.stats.dbclass is custom type."),
+    STORAGE_SCHEMA_READER_IMPL("metastore.storage.schema.reader.impl", NO_SUCH_KEY,
+        DefaultStorageSchemaReader.class.getName(),
+        "The class to use to read schemas from storage.  It must implement " +
+        "org.apache.hadoop.hive.metastore.StorageSchemaReader"),
     STORE_MANAGER_TYPE("datanucleus.storeManagerType", "datanucleus.storeManagerType", "rdbms", "metadata store type"),
     SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES("metastore.support.special.characters.tablename",
         "hive.support.special.characters.tablename", true,
@@ -920,6 +941,10 @@ public class MetastoreConf {
       return hiveName;
     }
 
+    public Object getDefaultVal() {
+      return defaultVal;
+    }
+
     @Override
     public String toString() {
       return varname;

http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/EventCleanerTask.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/EventCleanerTask.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/EventCleanerTask.java
index 230c0d3..cc9cc9d 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/EventCleanerTask.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/EventCleanerTask.java
@@ -18,14 +18,12 @@
 
 package org.apache.hadoop.hive.metastore.events;
 
-import java.util.TimerTask;
-
 import org.apache.hadoop.hive.metastore.IHMSHandler;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.metastore.RawStore;
 
-public class EventCleanerTask extends TimerTask{
+public class EventCleanerTask implements Runnable {
 
   public static final Logger LOG = LoggerFactory.getLogger(EventCleanerTask.class);
   private final IHMSHandler handler;

http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/ListenerEvent.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/ListenerEvent.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/ListenerEvent.java
index e031dbb..56eb9ed 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/ListenerEvent.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/events/ListenerEvent.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hive.metastore.events;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hive.metastore.HiveMetaStore;
 import org.apache.hadoop.hive.metastore.IHMSHandler;
 import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
 
@@ -100,6 +101,15 @@ public abstract class ListenerEvent {
   }
 
   /**
+   * You should use {@link #getIHMSHandler()} instead.
+   * @return handler.
+   */
+  @Deprecated
+  public HiveMetaStore.HMSHandler getHandler() {
+    return (HiveMetaStore.HMSHandler)handler;
+  }
+
+  /**
    * @return the handler
    */
   public IHMSHandler getIHMSHandler() {

http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java
index 65084bd..eefb505 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java
@@ -40,8 +40,7 @@ public interface JDOConnectionURLHook {
    * @return the connection URL
    * @throws Exception
    */
-  public String getJdoConnectionUrl(Configuration conf)
-  throws Exception;
+  String getJdoConnectionUrl(Configuration conf) throws Exception;
 
   /**
    * Alerts this that the connection URL was bad. Can be used to collect stats,
@@ -49,5 +48,5 @@ public interface JDOConnectionURLHook {
    *
    * @param url
    */
-  public void notifyBadConnectionUrl(String url);
+  void notifyBadConnectionUrl(String url);
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/repl/DumpDirCleanerTask.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/repl/DumpDirCleanerTask.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/repl/DumpDirCleanerTask.java
new file mode 100644
index 0000000..46abb4b
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/repl/DumpDirCleanerTask.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.repl;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.metastore.RunnableConfigurable;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.concurrent.TimeUnit;
+
+public class DumpDirCleanerTask implements RunnableConfigurable {
+  public static final Logger LOG = LoggerFactory.getLogger(DumpDirCleanerTask.class);
+  private Configuration conf;
+  private Path dumpRoot;
+  private long ttl;
+
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+    dumpRoot = new Path(MetastoreConf.getVar(conf, ConfVars.REPLDIR));
+    ttl = MetastoreConf.getTimeVar(conf, ConfVars.REPL_DUMPDIR_TTL, TimeUnit.MILLISECONDS);
+  }
+
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+
+  @Override
+  public void run() {
+    LOG.debug("Trying to delete old dump dirs");
+    try {
+      FileSystem fs = FileSystem.get(dumpRoot.toUri(), conf);
+      FileStatus[] statuses = fs.listStatus(dumpRoot);
+      for (FileStatus status : statuses)
+      {
+        if (status.getModificationTime() < System.currentTimeMillis() - ttl)
+        {
+          fs.delete(status.getPath(), true);
+          LOG.info("Deleted old dump dir: " + status.getPath());
+        }
+      }
+    } catch (IOException e) {
+      LOG.error("Error while trying to delete dump dir", e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/CommonCliOptions.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/CommonCliOptions.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/CommonCliOptions.java
new file mode 100644
index 0000000..24e4ebe
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/CommonCliOptions.java
@@ -0,0 +1,160 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.utils;
+
+import java.util.Properties;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.OptionBuilder;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.logging.log4j.Level;
+
+/**
+ * Reusable code for Hive Cli's.
+ * <p>
+ * Basic usage is: create an instance (usually a subclass if you want to
+ * all your own options or processing instructions), parse, and then use
+ * the resulting information.
+ * <p>
+ * See org.apache.hadoop.hive.service.HiveServer or
+ *     org.apache.hadoop.hive.metastore.HiveMetaStore
+ *     for examples of use.
+ *
+ */
+public class CommonCliOptions {
+  /**
+   * Options for parsing the command line.
+   */
+  protected final Options OPTIONS = new Options();
+
+  protected CommandLine commandLine;
+
+  /**
+   * The name of this cli.
+   */
+  protected final String cliname;
+
+  private boolean verbose = false;
+
+  /**
+   * Create an instance with common options (help, verbose, etc...).
+   *
+   * @param cliname the name of the command
+   * @param includeHiveConf include "hiveconf" as an option if true
+   */
+  @SuppressWarnings("static-access")
+  public CommonCliOptions(String cliname, boolean includeHiveConf) {
+    this.cliname = cliname;
+
+    // [-v|--verbose]
+    OPTIONS.addOption(new Option("v", "verbose", false, "Verbose mode"));
+
+    // [-h|--help]
+    OPTIONS.addOption(new Option("h", "help", false, "Print help information"));
+
+    if (includeHiveConf) {
+      OPTIONS.addOption(OptionBuilder
+          .withValueSeparator()
+          .hasArgs(2)
+          .withArgName("property=value")
+          .withLongOpt("hiveconf")
+          .withDescription("Use value for given property")
+          .create());
+    }
+  }
+
+  /**
+   * Add the hiveconf properties to the Java system properties, override
+   * anything therein.
+   *
+   * @return a copy of the properties specified in hiveconf
+   */
+  public Properties addHiveconfToSystemProperties() {
+    Properties confProps = commandLine.getOptionProperties("hiveconf");
+    for (String propKey : confProps.stringPropertyNames()) {
+      if (verbose) {
+        System.err.println(
+            "hiveconf: " + propKey + "=" + confProps.getProperty(propKey));
+      }
+      if (propKey.equalsIgnoreCase("hive.root.logger")) {
+        splitAndSetLogger(propKey, confProps);
+      } else {
+        System.setProperty(propKey, confProps.getProperty(propKey));
+      }
+    }
+    return confProps;
+  }
+
+  public static void splitAndSetLogger(final String propKey, final Properties confProps) {
+    String propVal = confProps.getProperty(propKey);
+    if (propVal.contains(",")) {
+      String[] tokens = propVal.split(",");
+      for (String token : tokens) {
+        if (Level.getLevel(token) == null) {
+          System.setProperty("hive.root.logger", token);
+        } else {
+          System.setProperty("hive.log.level", token);
+        }
+      }
+    } else {
+      System.setProperty(propKey, confProps.getProperty(propKey));
+    }
+  }
+
+  /**
+   * Print usage information for the CLI.
+   */
+  public void printUsage() {
+    new HelpFormatter().printHelp(cliname, OPTIONS);
+  }
+
+  /**
+   * Parse the arguments.
+   * @param args
+   */
+  public void parse(String[] args) {
+    try {
+      commandLine = new GnuParser().parse(OPTIONS, args);
+
+      if (commandLine.hasOption('h')) {
+        printUsage();
+        System.exit(1);
+      }
+      if (commandLine.hasOption('v')) {
+        verbose = true;
+      }
+    } catch (ParseException e) {
+      System.err.println(e.getMessage());
+      printUsage();
+      System.exit(1);
+    }
+
+  }
+
+  /**
+   * Should the client be verbose.
+   */
+  public boolean isVerbose() {
+    return verbose;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
index 2dac899..c9deccc 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/FileUtils.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.Trash;
+import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
 import org.slf4j.Logger;
@@ -37,6 +38,12 @@ import java.util.Collections;
 import java.util.List;
 
 public class FileUtils {
+  private static final PathFilter SNAPSHOT_DIR_PATH_FILTER = new PathFilter() {
+    @Override
+    public boolean accept(Path p) {
+      return ".snapshot".equalsIgnoreCase(p.getName());
+    }
+  };
   private static final Logger LOG = LoggerFactory.getLogger(FileUtils.class);
 
   public static final PathFilter HIDDEN_FILES_PATH_FILTER = new PathFilter() {
@@ -379,4 +386,30 @@ public class FileUtils {
     //Once equality has been added in HDFS-9159, we should make use of it
     return fs1.getUri().equals(fs2.getUri());
   }
+
+  /**
+   * Check if the path contains a subdirectory named '.snapshot'
+   * @param p path to check
+   * @param fs filesystem of the path
+   * @return true if p contains a subdirectory named '.snapshot'
+   * @throws IOException
+   */
+  public static boolean pathHasSnapshotSubDir(Path p, FileSystem fs) throws IOException {
+    // Hadoop is missing a public API to check for snapshotable directories. Check with the directory name
+    // until a more appropriate API is provided by HDFS-12257.
+    final FileStatus[] statuses = fs.listStatus(p, FileUtils.SNAPSHOT_DIR_PATH_FILTER);
+    return statuses != null && statuses.length != 0;
+  }
+
+  public static void makeDir(Path path, Configuration conf) throws MetaException {
+    FileSystem fs;
+    try {
+      fs = path.getFileSystem(conf);
+      if (!fs.exists(path)) {
+        fs.mkdirs(path);
+      }
+    } catch (IOException e) {
+      throw new MetaException("Unable to : " + path);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java
index ecbddc3..f494a8e 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java
@@ -18,10 +18,20 @@
 package org.apache.hadoop.hive.metastore.utils;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
+import com.google.common.base.Objects;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Iterables;
+import org.apache.commons.lang.ArrayUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -39,6 +49,7 @@ import java.io.IOException;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 
@@ -218,4 +229,166 @@ public class HdfsUtils {
     }
     return (DistributedFileSystem)fs;
   }
+
+  public static class HadoopFileStatus {
+
+    private final FileStatus fileStatus;
+    private final AclStatus aclStatus;
+
+    public HadoopFileStatus(Configuration conf, FileSystem fs, Path file) throws IOException {
+
+      FileStatus fileStatus = fs.getFileStatus(file);
+      AclStatus aclStatus = null;
+      if (Objects.equal(conf.get("dfs.namenode.acls.enabled"), "true")) {
+        //Attempt extended Acl operations only if its enabled, but don't fail the operation regardless.
+        try {
+          aclStatus = fs.getAclStatus(file);
+        } catch (Exception e) {
+          LOG.info("Skipping ACL inheritance: File system for path " + file + " " +
+              "does not support ACLs but dfs.namenode.acls.enabled is set to true. ");
+          LOG.debug("The details are: " + e, e);
+        }
+      }this.fileStatus = fileStatus;
+      this.aclStatus = aclStatus;
+    }
+
+    public FileStatus getFileStatus() {
+      return fileStatus;
+    }
+
+    List<AclEntry> getAclEntries() {
+      return aclStatus == null ? null : Collections.unmodifiableList(aclStatus.getEntries());
+    }
+
+    @VisibleForTesting
+    AclStatus getAclStatus() {
+      return this.aclStatus;
+    }
+  }
+
+  /**
+   * Copy the permissions, group, and ACLs from a source {@link HadoopFileStatus} to a target {@link Path}. This method
+   * will only log a warning if permissions cannot be set, no exception will be thrown.
+   *
+   * @param conf the {@link Configuration} used when setting permissions and ACLs
+   * @param sourceStatus the source {@link HadoopFileStatus} to copy permissions and ACLs from
+   * @param targetGroup the group of the target {@link Path}, if this is set and it is equal to the source group, an
+   *                    extra set group operation is avoided
+   * @param fs the {@link FileSystem} that contains the target {@link Path}
+   * @param target the {@link Path} to copy permissions, group, and ACLs to
+   * @param recursion recursively set permissions and ACLs on the target {@link Path}
+   */
+  public static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileStatus sourceStatus,
+                                       String targetGroup, FileSystem fs, Path target, boolean recursion) {
+    setFullFileStatus(conf, sourceStatus, targetGroup, fs, target, recursion, recursion ? new FsShell() : null);
+  }
+
+  @VisibleForTesting
+  static void setFullFileStatus(Configuration conf, HdfsUtils.HadoopFileStatus sourceStatus,
+                                String targetGroup, FileSystem fs, Path target, boolean recursion, FsShell fsShell) {
+    try {
+      FileStatus fStatus = sourceStatus.getFileStatus();
+      String group = fStatus.getGroup();
+      boolean aclEnabled = Objects.equal(conf.get("dfs.namenode.acls.enabled"), "true");
+      FsPermission sourcePerm = fStatus.getPermission();
+      List<AclEntry> aclEntries = null;
+      if (aclEnabled) {
+        if (sourceStatus.getAclEntries() != null) {
+          LOG.trace(sourceStatus.getAclStatus().toString());
+          aclEntries = new ArrayList<>(sourceStatus.getAclEntries());
+          removeBaseAclEntries(aclEntries);
+
+          //the ACL api's also expect the tradition user/group/other permission in the form of ACL
+          aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.USER, sourcePerm.getUserAction()));
+          aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.GROUP, sourcePerm.getGroupAction()));
+          aclEntries.add(newAclEntry(AclEntryScope.ACCESS, AclEntryType.OTHER, sourcePerm.getOtherAction()));
+        }
+      }
+
+      if (recursion) {
+        //use FsShell to change group, permissions, and extended ACL's recursively
+        fsShell.setConf(conf);
+        //If there is no group of a file, no need to call chgrp
+        if (group != null && !group.isEmpty()) {
+          run(fsShell, new String[]{"-chgrp", "-R", group, target.toString()});
+        }
+        if (aclEnabled) {
+          if (null != aclEntries) {
+            //Attempt extended Acl operations only if its enabled, 8791but don't fail the operation regardless.
+            try {
+              //construct the -setfacl command
+              String aclEntry = Joiner.on(",").join(aclEntries);
+              run(fsShell, new String[]{"-setfacl", "-R", "--set", aclEntry, target.toString()});
+
+            } catch (Exception e) {
+              LOG.info("Skipping ACL inheritance: File system for path " + target + " " +
+                  "does not support ACLs but dfs.namenode.acls.enabled is set to true. ");
+              LOG.debug("The details are: " + e, e);
+            }
+          }
+        } else {
+          String permission = Integer.toString(sourcePerm.toShort(), 8);
+          run(fsShell, new String[]{"-chmod", "-R", permission, target.toString()});
+        }
+      } else {
+        if (group != null && !group.isEmpty()) {
+          if (targetGroup == null ||
+              !group.equals(targetGroup)) {
+            fs.setOwner(target, null, group);
+          }
+        }
+        if (aclEnabled) {
+          if (null != aclEntries) {
+            fs.setAcl(target, aclEntries);
+          }
+        } else {
+          fs.setPermission(target, sourcePerm);
+        }
+      }
+    } catch (Exception e) {
+      LOG.warn(
+          "Unable to inherit permissions for file " + target + " from file " + sourceStatus.getFileStatus().getPath(),
+          e.getMessage());
+      LOG.debug("Exception while inheriting permissions", e);
+    }
+  }
+
+  /**
+   * Removes basic permission acls (unamed acls) from the list of acl entries
+   * @param entries acl entries to remove from.
+   */
+  private static void removeBaseAclEntries(List<AclEntry> entries) {
+    Iterables.removeIf(entries, new Predicate<AclEntry>() {
+      @Override
+      public boolean apply(AclEntry input) {
+        if (input.getName() == null) {
+          return true;
+        }
+        return false;
+      }
+    });
+  }
+
+  /**
+   * Create a new AclEntry with scope, type and permission (no name).
+   *
+   * @param scope
+   *          AclEntryScope scope of the ACL entry
+   * @param type
+   *          AclEntryType ACL entry type
+   * @param permission
+   *          FsAction set of permissions in the ACL entry
+   * @return AclEntry new AclEntry
+   */
+  private static AclEntry newAclEntry(AclEntryScope scope, AclEntryType type,
+                                      FsAction permission) {
+    return new AclEntry.Builder().setScope(scope).setType(type)
+        .setPermission(permission).build();
+  }
+
+  private static void run(FsShell shell, String[] command) throws Exception {
+    LOG.debug(ArrayUtils.toString(command));
+    int retval = shell.run(command);
+    LOG.debug("Return value is :" + retval);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/LogUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/LogUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/LogUtils.java
new file mode 100644
index 0000000..06fe6cb
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/LogUtils.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.utils;
+
+import java.io.File;
+import java.net.URL;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.core.config.Configurator;
+import org.apache.logging.log4j.core.impl.Log4jContextFactory;
+import org.apache.logging.log4j.spi.DefaultThreadContextMap;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Utilities common to logging operations.
+ */
+public class LogUtils {
+
+  private static final String HIVE_L4J = "hive-log4j2.properties";
+  private static final Logger l4j = LoggerFactory.getLogger(LogUtils.class);
+
+  @SuppressWarnings("serial")
+  public static class LogInitializationException extends Exception {
+    LogInitializationException(String msg) {
+      super(msg);
+    }
+  }
+
+  /**
+   * Initialize log4j.
+   *
+   * @return an message suitable for display to the user
+   * @throws LogInitializationException if log4j fails to initialize correctly
+   */
+  public static String initHiveLog4j(Configuration conf)
+    throws LogInitializationException {
+    return initHiveLog4jCommon(conf, MetastoreConf.ConfVars.LOG4J_FILE);
+  }
+
+  private static String initHiveLog4jCommon(Configuration conf, ConfVars confVarName)
+    throws LogInitializationException {
+    if (MetastoreConf.getVar(conf, confVarName).equals("")) {
+      // if log4j configuration file not set, or could not found, use default setting
+      return initHiveLog4jDefault(conf, "", confVarName);
+    } else {
+      // if log4j configuration file found successfully, use HiveConf property value
+      String log4jFileName = MetastoreConf.getVar(conf, confVarName);
+      File log4jConfigFile = new File(log4jFileName);
+      boolean fileExists = log4jConfigFile.exists();
+      if (!fileExists) {
+        // if property specified file not found in local file system
+        // use default setting
+        return initHiveLog4jDefault(
+          conf, "Not able to find conf file: " + log4jConfigFile, confVarName);
+      } else {
+        // property speficied file found in local file system
+        // use the specified file
+        final boolean async = checkAndSetAsyncLogging(conf);
+        // required for MDC based routing appender so that child threads can inherit the MDC context
+        System.setProperty(DefaultThreadContextMap.INHERITABLE_MAP, "true");
+        Configurator.initialize(null, log4jFileName);
+        logConfigLocation();
+        return "Logging initialized using configuration in " + log4jConfigFile + " Async: " + async;
+      }
+    }
+  }
+
+  private static boolean checkAndSetAsyncLogging(final Configuration conf) {
+    final boolean asyncLogging = MetastoreConf.getBoolVar(conf, ConfVars.ASYNC_LOG_ENABLED);
+    if (asyncLogging) {
+      System.setProperty("Log4jContextSelector",
+          "org.apache.logging.log4j.core.async.AsyncLoggerContextSelector");
+      // default is ClassLoaderContextSelector which is created during automatic logging
+      // initialization in a static initialization block.
+      // Changing ContextSelector at runtime requires creating new context factory which will
+      // internally create new context selector based on system property.
+      LogManager.setFactory(new Log4jContextFactory());
+    }
+    return asyncLogging;
+  }
+
+  private static String initHiveLog4jDefault(Configuration conf, String logMessage, ConfVars confVarName)
+    throws LogInitializationException {
+    URL hive_l4j = null;
+    switch (confVarName) {
+      case LOG4J_FILE:
+        hive_l4j = LogUtils.class.getClassLoader().getResource(HIVE_L4J);
+        break;
+      default:
+        break;
+    }
+    if (hive_l4j != null) {
+      final boolean async = checkAndSetAsyncLogging(conf);
+      System.setProperty(DefaultThreadContextMap.INHERITABLE_MAP, "true");
+      Configurator.initialize(null, hive_l4j.toString());
+      logConfigLocation();
+      return (logMessage + "\n" + "Logging initialized using configuration in " + hive_l4j +
+          " Async: " + async);
+    } else {
+      throw new LogInitializationException(
+        logMessage + "Unable to initialize logging using "
+        + LogUtils.HIVE_L4J + ", not found on CLASSPATH!");
+    }
+  }
+
+  private static void logConfigLocation() throws LogInitializationException {
+    // Log a warning if hive-default.xml is found on the classpath
+    if (MetastoreConf.getHiveDefaultLocation() != null) {
+      l4j.warn("DEPRECATED: Ignoring hive-default.xml found on the CLASSPATH at "
+        + MetastoreConf.getHiveDefaultLocation().getPath());
+    }
+    // Look for hive-site.xml on the CLASSPATH and log its location if found.
+    if (MetastoreConf.getHiveSiteLocation() == null) {
+      l4j.warn("hive-site.xml not found on CLASSPATH");
+    } else {
+      l4j.debug("Using hive-site.xml found on CLASSPATH at "
+        + MetastoreConf.getHiveSiteLocation().getPath());
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
index 50e4244..bf25e50 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java
@@ -22,9 +22,12 @@ import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.commons.collections.ListUtils;
-import org.apache.commons.lang.*;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.metastore.ColumnType;
 import org.apache.hadoop.hive.metastore.TableType;
@@ -35,6 +38,7 @@ import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.Decimal;
 import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.metastore.api.Partition;
@@ -45,26 +49,37 @@ import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
 import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregator;
 import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregatorFactory;
+import org.apache.hadoop.hive.metastore.columnstats.merge.ColumnStatsMerger;
+import org.apache.hadoop.hive.metastore.columnstats.merge.ColumnStatsMergerFactory;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.apache.hadoop.security.SaslRpcServer;
+import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.util.MachineList;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.annotation.Nullable;
+import java.io.File;
+import java.lang.reflect.InvocationTargetException;
 import java.math.BigDecimal;
 import java.math.BigInteger;
+import java.net.URL;
+import java.net.URLClassLoader;
 import java.nio.charset.Charset;
 import java.nio.charset.StandardCharsets;
 import java.security.MessageDigest;
 import java.text.DateFormat;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
-import java.util.Set;
 import java.util.SortedMap;
 import java.util.SortedSet;
 import java.util.TreeMap;
@@ -206,12 +221,12 @@ public class MetaStoreUtils {
         singleObj.add(obj);
         ColumnStatistics singleCS = new ColumnStatistics(css.getStatsDesc(), singleObj);
         if (!map.containsKey(obj.getColName())) {
-          map.put(obj.getColName(), new ArrayList<ColumnStatistics>());
+          map.put(obj.getColName(), new ArrayList<>());
         }
         map.get(obj.getColName()).add(singleCS);
       }
     }
-    return MetaStoreUtils.aggrPartitionStats(map,dbName,tableName,partNames,colNames,useDensityFunctionForNDVEstimation, ndvTuner);
+    return aggrPartitionStats(map,dbName,tableName,partNames,colNames,useDensityFunctionForNDVEstimation, ndvTuner);
   }
 
   public static List<ColumnStatisticsObj> aggrPartitionStats(
@@ -401,7 +416,7 @@ public class MetaStoreUtils {
    *              if it doesn't match the pattern.
    */
   public static boolean validateName(String name, Configuration conf) {
-    Pattern tpat = null;
+    Pattern tpat;
     String allowedCharacters = "\\w_";
     if (conf != null
         && MetastoreConf.getBoolVar(conf,
@@ -490,7 +505,7 @@ public class MetaStoreUtils {
       return false;
     }
 
-    if (MetaStoreUtils.isView(tbl)) {
+    if (isView(tbl)) {
       return false;
     }
 
@@ -602,7 +617,7 @@ public class MetaStoreUtils {
         params == null ||
         !containsAllFastStats(params)) {
       if (params == null) {
-        params = new HashMap<String,String>();
+        params = new HashMap<>();
       }
       if (!newDir) {
         // The table location already exists and may contain data.
@@ -701,7 +716,7 @@ public class MetaStoreUtils {
         params == null ||
         !containsAllFastStats(params)) {
       if (params == null) {
-        params = new HashMap<String,String>();
+        params = new HashMap<>();
       }
       if (!madeDir) {
         // The partition location already existed and may contain data. Lets try to
@@ -728,7 +743,7 @@ public class MetaStoreUtils {
       return false;
     }
 
-    Map<String, String> columnNameTypePairMap = new HashMap<String, String>(newCols.size());
+    Map<String, String> columnNameTypePairMap = new HashMap<>(newCols.size());
     for (FieldSchema newCol : newCols) {
       columnNameTypePairMap.put(newCol.getName().toLowerCase(), newCol.getType());
     }
@@ -747,4 +762,288 @@ public class MetaStoreUtils {
     String transactionalProp = params.get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
     return (transactionalProp != null && "insert_only".equalsIgnoreCase(transactionalProp));
   }
+
+  /**
+   * create listener instances as per the configuration.
+   *
+   * @param clazz Class of the listener
+   * @param conf configuration object
+   * @param listenerImplList Implementation class name
+   * @return instance of the listener
+   * @throws MetaException if there is any failure instantiating the class
+   */
+  public static <T> List<T> getMetaStoreListeners(Class<T> clazz,
+      Configuration conf, String listenerImplList) throws MetaException {
+    List<T> listeners = new ArrayList<T>();
+
+    if (StringUtils.isBlank(listenerImplList)) {
+      return listeners;
+    }
+
+    String[] listenerImpls = listenerImplList.split(",");
+    for (String listenerImpl : listenerImpls) {
+      try {
+        T listener = (T) Class.forName(
+            listenerImpl.trim(), true, JavaUtils.getClassLoader()).getConstructor(
+                Configuration.class).newInstance(conf);
+        listeners.add(listener);
+      } catch (InvocationTargetException ie) {
+        throw new MetaException("Failed to instantiate listener named: "+
+            listenerImpl + ", reason: " + ie.getCause());
+      } catch (Exception e) {
+        throw new MetaException("Failed to instantiate listener named: "+
+            listenerImpl + ", reason: " + e);
+      }
+    }
+
+    return listeners;
+  }
+
+  public static String validateSkewedColNames(List<String> cols) {
+    if (CollectionUtils.isEmpty(cols)) {
+      return null;
+    }
+    for (String col : cols) {
+      if (!validateColumnName(col)) {
+        return col;
+      }
+    }
+    return null;
+  }
+
+  public static String validateSkewedColNamesSubsetCol(List<String> skewedColNames,
+      List<FieldSchema> cols) {
+    if (CollectionUtils.isEmpty(skewedColNames)) {
+      return null;
+    }
+    List<String> colNames = new ArrayList<>(cols.size());
+    for (FieldSchema fieldSchema : cols) {
+      colNames.add(fieldSchema.getName());
+    }
+    // make a copy
+    List<String> copySkewedColNames = new ArrayList<>(skewedColNames);
+    // remove valid columns
+    copySkewedColNames.removeAll(colNames);
+    if (copySkewedColNames.isEmpty()) {
+      return null;
+    }
+    return copySkewedColNames.toString();
+  }
+
+  public static boolean isNonNativeTable(Table table) {
+    if (table == null || table.getParameters() == null) {
+      return false;
+    }
+    return (table.getParameters().get(hive_metastoreConstants.META_TABLE_STORAGE) != null);
+  }
+
+  public static boolean isIndexTable(Table table) {
+    if (table == null) {
+      return false;
+    }
+    return TableType.INDEX_TABLE.toString().equals(table.getTableType());
+  }
+
+  /**
+   * Given a list of partition columns and a partial mapping from
+   * some partition columns to values the function returns the values
+   * for the column.
+   * @param partCols the list of table partition columns
+   * @param partSpec the partial mapping from partition column to values
+   * @return list of values of for given partition columns, any missing
+   *         values in partSpec is replaced by an empty string
+   */
+  public static List<String> getPvals(List<FieldSchema> partCols,
+                                      Map<String, String> partSpec) {
+    List<String> pvals = new ArrayList<>(partCols.size());
+    for (FieldSchema field : partCols) {
+      String val = StringUtils.defaultString(partSpec.get(field.getName()));
+      pvals.add(val);
+    }
+    return pvals;
+  }
+
+  /**
+   * @param schema1: The first schema to be compared
+   * @param schema2: The second schema to be compared
+   * @return true if the two schemas are the same else false
+   *         for comparing a field we ignore the comment it has
+   */
+  public static boolean compareFieldColumns(List<FieldSchema> schema1, List<FieldSchema> schema2) {
+    if (schema1.size() != schema2.size()) {
+      return false;
+    }
+    Iterator<FieldSchema> its1 = schema1.iterator();
+    Iterator<FieldSchema> its2 = schema2.iterator();
+    while (its1.hasNext()) {
+      FieldSchema f1 = its1.next();
+      FieldSchema f2 = its2.next();
+      // The default equals provided by thrift compares the comments too for
+      // equality, thus we need to compare the relevant fields here.
+      if (!StringUtils.equals(f1.getName(), f2.getName()) ||
+          !StringUtils.equals(f1.getType(), f2.getType())) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  public static boolean isArchived(Partition part) {
+    Map<String, String> params = part.getParameters();
+    return "TRUE".equalsIgnoreCase(params.get(hive_metastoreConstants.IS_ARCHIVED));
+  }
+
+  public static Path getOriginalLocation(Partition part) {
+    Map<String, String> params = part.getParameters();
+    assert(isArchived(part));
+    String originalLocation = params.get(hive_metastoreConstants.ORIGINAL_LOCATION);
+    assert( originalLocation != null);
+
+    return new Path(originalLocation);
+  }
+
+  private static String ARCHIVING_LEVEL = "archiving_level";
+  public static int getArchivingLevel(Partition part) throws MetaException {
+    if (!isArchived(part)) {
+      throw new MetaException("Getting level of unarchived partition");
+    }
+
+    String lv = part.getParameters().get(ARCHIVING_LEVEL);
+    if (lv != null) {
+      return Integer.parseInt(lv);
+    }
+    // partitions archived before introducing multiple archiving
+    return part.getValues().size();
+  }
+
+  public static boolean partitionNameHasValidCharacters(List<String> partVals,
+      Pattern partitionValidationPattern) {
+    return getPartitionValWithInvalidCharacter(partVals, partitionValidationPattern) == null;
+  }
+
+  // this function will merge csOld into csNew.
+  public static void mergeColStats(ColumnStatistics csNew, ColumnStatistics csOld)
+      throws InvalidObjectException {
+    List<ColumnStatisticsObj> list = new ArrayList<>();
+    if (csNew.getStatsObj().size() != csOld.getStatsObjSize()) {
+      // Some of the columns' stats are missing
+      // This implies partition schema has changed. We will merge columns
+      // present in both, overwrite stats for columns absent in metastore and
+      // leave alone columns stats missing from stats task. This last case may
+      // leave stats in stale state. This will be addressed later.
+      LOG.debug("New ColumnStats size is {}, but old ColumnStats size is {}",
+          csNew.getStatsObj().size(), csOld.getStatsObjSize());
+    }
+    // In this case, we have to find out which columns can be merged.
+    Map<String, ColumnStatisticsObj> map = new HashMap<>();
+    // We build a hash map from colName to object for old ColumnStats.
+    for (ColumnStatisticsObj obj : csOld.getStatsObj()) {
+      map.put(obj.getColName(), obj);
+    }
+    for (int index = 0; index < csNew.getStatsObj().size(); index++) {
+      ColumnStatisticsObj statsObjNew = csNew.getStatsObj().get(index);
+      ColumnStatisticsObj statsObjOld = map.get(statsObjNew.getColName());
+      if (statsObjOld != null) {
+        // If statsObjOld is found, we can merge.
+        ColumnStatsMerger merger = ColumnStatsMergerFactory.getColumnStatsMerger(statsObjNew,
+            statsObjOld);
+        merger.merge(statsObjNew, statsObjOld);
+      }
+      list.add(statsObjNew);
+    }
+    csNew.setStatsObj(list);
+  }
+
+  /**
+   * Read and return the meta store Sasl configuration. Currently it uses the default
+   * Hadoop SASL configuration and can be configured using "hadoop.rpc.protection"
+   * HADOOP-10211, made a backward incompatible change due to which this call doesn't
+   * work with Hadoop 2.4.0 and later.
+   * @param conf
+   * @return The SASL configuration
+   */
+  public static Map<String, String> getMetaStoreSaslProperties(Configuration conf, boolean useSSL) {
+    // As of now Hive Meta Store uses the same configuration as Hadoop SASL configuration
+
+    // If SSL is enabled, override the given value of "hadoop.rpc.protection" and set it to "authentication"
+    // This disables any encryption provided by SASL, since SSL already provides it
+    String hadoopRpcProtectionVal = conf.get(CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION);
+    String hadoopRpcProtectionAuth = SaslRpcServer.QualityOfProtection.AUTHENTICATION.toString();
+
+    if (useSSL && hadoopRpcProtectionVal != null && !hadoopRpcProtectionVal.equals(hadoopRpcProtectionAuth)) {
+      LOG.warn("Overriding value of " + CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION + " setting it from "
+          + hadoopRpcProtectionVal + " to " + hadoopRpcProtectionAuth + " because SSL is enabled");
+      conf.set(CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION, hadoopRpcProtectionAuth);
+    }
+    return HadoopThriftAuthBridge.getBridge().getHadoopSaslProperties(conf);
+  }
+
+  /**
+   * Add new elements to the classpath.
+   *
+   * @param newPaths
+   *          Array of classpath elements
+   */
+  public static ClassLoader addToClassPath(ClassLoader cloader, String[] newPaths) throws Exception {
+    URLClassLoader loader = (URLClassLoader) cloader;
+    List<URL> curPath = Arrays.asList(loader.getURLs());
+    ArrayList<URL> newPath = new ArrayList<>(curPath.size());
+
+    // get a list with the current classpath components
+    for (URL onePath : curPath) {
+      newPath.add(onePath);
+    }
+    curPath = newPath;
+
+    for (String onestr : newPaths) {
+      URL oneurl = urlFromPathString(onestr);
+      if (oneurl != null && !curPath.contains(oneurl)) {
+        curPath.add(oneurl);
+      }
+    }
+
+    return new URLClassLoader(curPath.toArray(new URL[0]), loader);
+  }
+
+  /**
+   * Create a URL from a string representing a path to a local file.
+   * The path string can be just a path, or can start with file:/, file:///
+   * @param onestr  path string
+   * @return
+   */
+  private static URL urlFromPathString(String onestr) {
+    URL oneurl = null;
+    try {
+      if (onestr.startsWith("file:/")) {
+        oneurl = new URL(onestr);
+      } else {
+        oneurl = new File(onestr).toURL();
+      }
+    } catch (Exception err) {
+      LOG.error("Bad URL " + onestr + ", ignoring path");
+    }
+    return oneurl;
+  }
+
+  /**
+   * Verify if the user is allowed to make DB notification related calls.
+   * Only the superusers defined in the Hadoop proxy user settings have the permission.
+   *
+   * @param user the short user name
+   * @param conf that contains the proxy user settings
+   * @return if the user has the permission
+   */
+  public static boolean checkUserHasHostProxyPrivileges(String user, Configuration conf, String ipAddress) {
+    DefaultImpersonationProvider sip = ProxyUsers.getDefaultImpersonationProvider();
+    // Just need to initialize the ProxyUsers for the first time, given that the conf will not change on the fly
+    if (sip == null) {
+      ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
+      sip = ProxyUsers.getDefaultImpersonationProvider();
+    }
+    Map<String, Collection<String>> proxyHosts = sip.getProxyHosts();
+    Collection<String> hostEntries = proxyHosts.get(sip.getProxySuperuserIpConfKey(user));
+    MachineList machineList = new MachineList(hostEntries);
+    ipAddress = (ipAddress == null) ? StringUtils.EMPTY : ipAddress;
+    return machineList.includes(ipAddress);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java
index b05c995..41a18cb 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java
@@ -34,14 +34,28 @@ import org.apache.hadoop.security.token.TokenSelector;
 import org.apache.zookeeper.client.ZooKeeperSaslClient;
 
 import javax.security.auth.login.AppConfigurationEntry;
+import org.apache.thrift.transport.TSSLTransportFactory;
+import org.apache.thrift.transport.TServerSocket;
+import org.apache.thrift.transport.TTransportException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.net.ssl.SSLServerSocket;
 import javax.security.auth.login.LoginException;
 import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag;
 
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.Map;
+import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
 
 public class SecurityUtils {
+  private static final Logger LOG = LoggerFactory.getLogger(SecurityUtils.class);
+
   public static UserGroupInformation getUGI() throws LoginException, IOException {
     String doAs = System.getenv("HADOOP_USER_NAME");
     if (doAs != null && doAs.length() > 0) {
@@ -209,4 +223,65 @@ public class SecurityUtils {
       return tokenStoreClass;
     }
   }
+
+
+  /**
+   * @return the user name set in hadoop.job.ugi param or the current user from System
+   * @throws IOException if underlying Hadoop call throws LoginException
+   */
+  public static String getUser() throws IOException {
+    try {
+      UserGroupInformation ugi = getUGI();
+      return ugi.getUserName();
+    } catch (LoginException le) {
+      throw new IOException(le);
+    }
+  }
+
+  public static TServerSocket getServerSocket(String hiveHost, int portNum) throws TTransportException {
+    InetSocketAddress serverAddress;
+    if (hiveHost == null || hiveHost.isEmpty()) {
+      // Wildcard bind
+      serverAddress = new InetSocketAddress(portNum);
+    } else {
+      serverAddress = new InetSocketAddress(hiveHost, portNum);
+    }
+    return new TServerSocket(serverAddress);
+  }
+
+  public static TServerSocket getServerSSLSocket(String hiveHost, int portNum, String keyStorePath,
+                                                 String keyStorePassWord, List<String> sslVersionBlacklist) throws TTransportException,
+      UnknownHostException {
+    TSSLTransportFactory.TSSLTransportParameters params =
+        new TSSLTransportFactory.TSSLTransportParameters();
+    params.setKeyStore(keyStorePath, keyStorePassWord);
+    InetSocketAddress serverAddress;
+    if (hiveHost == null || hiveHost.isEmpty()) {
+      // Wildcard bind
+      serverAddress = new InetSocketAddress(portNum);
+    } else {
+      serverAddress = new InetSocketAddress(hiveHost, portNum);
+    }
+    TServerSocket thriftServerSocket =
+        TSSLTransportFactory.getServerSocket(portNum, 0, serverAddress.getAddress(), params);
+    if (thriftServerSocket.getServerSocket() instanceof SSLServerSocket) {
+      List<String> sslVersionBlacklistLocal = new ArrayList<>();
+      for (String sslVersion : sslVersionBlacklist) {
+        sslVersionBlacklistLocal.add(sslVersion.trim().toLowerCase());
+      }
+      SSLServerSocket sslServerSocket = (SSLServerSocket) thriftServerSocket.getServerSocket();
+      List<String> enabledProtocols = new ArrayList<>();
+      for (String protocol : sslServerSocket.getEnabledProtocols()) {
+        if (sslVersionBlacklistLocal.contains(protocol.toLowerCase())) {
+          LOG.debug("Disabling SSL Protocol: " + protocol);
+        } else {
+          enabledProtocols.add(protocol);
+        }
+      }
+      sslServerSocket.setEnabledProtocols(enabledProtocols.toArray(new String[0]));
+      LOG.info("SSL Server Socket Enabled Protocols: "
+          + Arrays.toString(sslServerSocket.getEnabledProtocols()));
+    }
+    return thriftServerSocket;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/c2bbd5f4/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyJdoConnectionUrlHook.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyJdoConnectionUrlHook.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyJdoConnectionUrlHook.java
new file mode 100644
index 0000000..b1cd7db
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/DummyJdoConnectionUrlHook.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.hooks.JDOConnectionURLHook;
+
+/**
+ *
+ * DummyJdoConnectionUrlHook.
+ *
+ * An implementation of JDOConnectionURLHook which simply returns CORRECT_URL when
+ * getJdoConnectionUrl is called.
+ */
+public class DummyJdoConnectionUrlHook implements JDOConnectionURLHook {
+
+  public static final String initialUrl = "BAD_URL";
+  public static final String newUrl = "CORRECT_URL";
+
+  @Override
+  public String getJdoConnectionUrl(Configuration conf) throws Exception {
+    return newUrl;
+  }
+
+  @Override
+  public void notifyBadConnectionUrl(String url) {
+  }
+
+}