You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ga...@apache.org on 2017/08/18 23:01:03 UTC

[1/3] hive git commit: HIVE-17241 Change metastore classes to not use the shims. This closes #228. (Alan Gates, reviewed by Vaibhav Gumashta)

Repository: hive
Updated Branches:
  refs/heads/master d33555827 -> e2770d6e3


http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TokenStoreDelegationTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TokenStoreDelegationTokenSecretManager.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TokenStoreDelegationTokenSecretManager.java
new file mode 100644
index 0000000..4abcec7
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TokenStoreDelegationTokenSecretManager.java
@@ -0,0 +1,333 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.security;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.lang.reflect.Method;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
+import org.apache.hadoop.security.token.delegation.DelegationKey;
+import org.apache.hadoop.security.token.delegation.MetastoreDelegationTokenSupport;
+import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Extension of {@link DelegationTokenSecretManager} to support alternative to default in-memory
+ * token management for fail-over and clustering through plug-able token store (ZooKeeper etc.).
+ * Delegation tokens will be retrieved from the store on-demand and (unlike base class behavior) not
+ * cached in memory. This avoids complexities related to token expiration. The security token is
+ * needed only at the time the transport is opened (as opposed to per interface operation). The
+ * assumption therefore is low cost of interprocess token retrieval (for random read efficient store
+ * such as ZooKeeper) compared to overhead of synchronizing per-process in-memory token caches.
+ * The wrapper incorporates the token store abstraction within the limitations of current
+ * Hive/Hadoop dependency (.20S) with minimum code duplication.
+ * Eventually this should be supported by Hadoop security directly.
+ */
+public class TokenStoreDelegationTokenSecretManager extends DelegationTokenSecretManager {
+
+  private static final Logger LOGGER =
+      LoggerFactory.getLogger(TokenStoreDelegationTokenSecretManager.class.getName());
+
+  final private long keyUpdateInterval;
+  final private long tokenRemoverScanInterval;
+  private Thread tokenRemoverThread;
+
+  final private DelegationTokenStore tokenStore;
+
+  public TokenStoreDelegationTokenSecretManager(long delegationKeyUpdateInterval,
+      long delegationTokenMaxLifetime, long delegationTokenRenewInterval,
+      long delegationTokenRemoverScanInterval,
+      DelegationTokenStore sharedStore) {
+    super(delegationKeyUpdateInterval, delegationTokenMaxLifetime, delegationTokenRenewInterval,
+        delegationTokenRemoverScanInterval);
+    this.keyUpdateInterval = delegationKeyUpdateInterval;
+    this.tokenRemoverScanInterval = delegationTokenRemoverScanInterval;
+
+    this.tokenStore = sharedStore;
+  }
+
+  protected Map<Integer, DelegationKey> reloadKeys() {
+    // read keys from token store
+    String[] allKeys = tokenStore.getMasterKeys();
+    Map<Integer, DelegationKey> keys = new HashMap<>(allKeys.length);
+    for (String keyStr : allKeys) {
+      DelegationKey key = new DelegationKey();
+      try {
+        decodeWritable(key, keyStr);
+        keys.put(key.getKeyId(), key);
+      } catch (IOException ex) {
+        LOGGER.error("Failed to load master key.", ex);
+      }
+    }
+    synchronized (this) {
+        super.allKeys.clear();
+        super.allKeys.putAll(keys);
+    }
+    return keys;
+  }
+
+  @Override
+  public byte[] retrievePassword(DelegationTokenIdentifier identifier) throws InvalidToken {
+      DelegationTokenInformation info = this.tokenStore.getToken(identifier);
+      if (info == null) {
+          throw new InvalidToken("token expired or does not exist: " + identifier);
+      }
+      // must reuse super as info.getPassword is not accessible
+      synchronized (this) {
+        try {
+          super.currentTokens.put(identifier, info);
+          return super.retrievePassword(identifier);
+        } finally {
+          super.currentTokens.remove(identifier);
+        }
+      }
+  }
+
+  @Override
+  public DelegationTokenIdentifier cancelToken(Token<DelegationTokenIdentifier> token,
+      String canceller) throws IOException {
+    DelegationTokenIdentifier id = getTokenIdentifier(token);
+    LOGGER.info("Token cancelation requested for identifier: "+id);
+    this.tokenStore.removeToken(id);
+    return id;
+  }
+
+  /**
+   * Create the password and add it to shared store.
+   */
+  @Override
+  protected byte[] createPassword(DelegationTokenIdentifier id) {
+    byte[] password;
+    DelegationTokenInformation info;
+    synchronized (this) {
+      password = super.createPassword(id);
+      // add new token to shared store
+      // need to persist expiration along with password
+      info = super.currentTokens.remove(id);
+      if (info == null) {
+        throw new IllegalStateException("Failed to retrieve token after creation");
+      }
+    }
+    this.tokenStore.addToken(id, info);
+    return password;
+  }
+
+  @Override
+  public long renewToken(Token<DelegationTokenIdentifier> token, String renewer) throws IOException {
+    // since renewal is KERBEROS authenticated token may not be cached
+    final DelegationTokenIdentifier id = getTokenIdentifier(token);
+    DelegationTokenInformation tokenInfo = this.tokenStore.getToken(id);
+    if (tokenInfo == null) {
+        throw new InvalidToken("token does not exist: " + id); // no token found
+    }
+    // ensure associated master key is available
+    if (!super.allKeys.containsKey(id.getMasterKeyId())) {
+      LOGGER.info("Unknown master key (id={}), (re)loading keys from token store.",
+        id.getMasterKeyId());
+      reloadKeys();
+    }
+    // reuse super renewal logic
+    synchronized (this) {
+      super.currentTokens.put(id,  tokenInfo);
+      try {
+        return super.renewToken(token, renewer);
+      } finally {
+        super.currentTokens.remove(id);
+      }
+    }
+  }
+
+  public static String encodeWritable(Writable key) throws IOException {
+    ByteArrayOutputStream bos = new ByteArrayOutputStream();
+    DataOutputStream dos = new DataOutputStream(bos);
+    key.write(dos);
+    dos.flush();
+    return Base64.encodeBase64URLSafeString(bos.toByteArray());
+  }
+
+  public static void decodeWritable(Writable w, String idStr) throws IOException {
+    DataInputStream in = new DataInputStream(new ByteArrayInputStream(Base64.decodeBase64(idStr)));
+    w.readFields(in);
+  }
+
+  /**
+   * Synchronize master key updates / sequence generation for multiple nodes.
+   * NOTE: {@link AbstractDelegationTokenSecretManager} keeps currentKey private, so we need
+   * to utilize this "hook" to manipulate the key through the object reference.
+   * This .20S workaround should cease to exist when Hadoop supports token store.
+   */
+  @Override
+  protected void logUpdateMasterKey(DelegationKey key) throws IOException {
+    int keySeq = this.tokenStore.addMasterKey(encodeWritable(key));
+    // update key with assigned identifier
+    DelegationKey keyWithSeq = new DelegationKey(keySeq, key.getExpiryDate(), key.getKey());
+    String keyStr = encodeWritable(keyWithSeq);
+    this.tokenStore.updateMasterKey(keySeq, keyStr);
+    decodeWritable(key, keyStr);
+    LOGGER.info("New master key with key id={}", key.getKeyId());
+    super.logUpdateMasterKey(key);
+  }
+
+  @Override
+  public synchronized void startThreads() throws IOException {
+    try {
+      // updateCurrentKey needs to be called to initialize the master key
+      // (there should be a null check added in the future in rollMasterKey)
+      // updateCurrentKey();
+      Method m = AbstractDelegationTokenSecretManager.class.getDeclaredMethod("updateCurrentKey");
+      m.setAccessible(true);
+      m.invoke(this);
+    } catch (Exception e) {
+      throw new IOException("Failed to initialize master key", e);
+    }
+    running = true;
+    tokenRemoverThread = new Daemon(new ExpiredTokenRemover());
+    tokenRemoverThread.start();
+  }
+
+  @Override
+  public synchronized void stopThreads() {
+    if (LOGGER.isDebugEnabled()) {
+      LOGGER.debug("Stopping expired delegation token remover thread");
+    }
+    running = false;
+    if (tokenRemoverThread != null) {
+      tokenRemoverThread.interrupt();
+    }
+  }
+
+  /**
+   * Remove expired tokens. Replaces logic in {@link AbstractDelegationTokenSecretManager}
+   * that cannot be reused due to private method access. Logic here can more efficiently
+   * deal with external token store by only loading into memory the minimum data needed.
+   */
+  protected void removeExpiredTokens() {
+    long now = System.currentTimeMillis();
+    Iterator<DelegationTokenIdentifier> i = tokenStore.getAllDelegationTokenIdentifiers()
+        .iterator();
+    while (i.hasNext()) {
+      DelegationTokenIdentifier id = i.next();
+      if (now > id.getMaxDate()) {
+        this.tokenStore.removeToken(id); // no need to look at token info
+      } else {
+        // get token info to check renew date
+        DelegationTokenInformation tokenInfo = tokenStore.getToken(id);
+        if (tokenInfo != null) {
+          if (now > tokenInfo.getRenewDate()) {
+            this.tokenStore.removeToken(id);
+          }
+        }
+      }
+    }
+  }
+
+  /**
+   * Extension of rollMasterKey to remove expired keys from store.
+   *
+   * @throws IOException
+   */
+  protected void rollMasterKeyExt() throws IOException {
+    Map<Integer, DelegationKey> keys = reloadKeys();
+    int currentKeyId = super.currentId;
+    MetastoreDelegationTokenSupport.rollMasterKey(TokenStoreDelegationTokenSecretManager.this);
+    List<DelegationKey> keysAfterRoll = Arrays.asList(getAllKeys());
+    for (DelegationKey key : keysAfterRoll) {
+      keys.remove(key.getKeyId());
+      if (key.getKeyId() == currentKeyId) {
+        tokenStore.updateMasterKey(currentKeyId, encodeWritable(key));
+      }
+    }
+    for (DelegationKey expiredKey : keys.values()) {
+      LOGGER.info("Removing expired key id={}", expiredKey.getKeyId());
+      try {
+        tokenStore.removeMasterKey(expiredKey.getKeyId());
+      } catch (Exception e) {
+        LOGGER.error("Error removing expired key id={}", expiredKey.getKeyId(), e);
+      }
+    }
+  }
+
+  /**
+   * Cloned from {@link AbstractDelegationTokenSecretManager} to deal with private access
+   * restriction (there would not be an need to clone the remove thread if the remove logic was
+   * protected/extensible).
+   */
+  protected class ExpiredTokenRemover extends Thread {
+    private long lastMasterKeyUpdate;
+    private long lastTokenCacheCleanup;
+
+    @Override
+    public void run() {
+      LOGGER.info("Starting expired delegation token remover thread, "
+          + "tokenRemoverScanInterval=" + tokenRemoverScanInterval
+          / (60 * 1000) + " min(s)");
+      while (running) {
+        try {
+          long now = System.currentTimeMillis();
+          if (lastMasterKeyUpdate + keyUpdateInterval < now) {
+            try {
+              rollMasterKeyExt();
+              lastMasterKeyUpdate = now;
+            } catch (IOException e) {
+              LOGGER.error("Master key updating failed. "
+                  + StringUtils.stringifyException(e));
+            }
+          }
+          if (lastTokenCacheCleanup + tokenRemoverScanInterval < now) {
+            removeExpiredTokens();
+            lastTokenCacheCleanup = now;
+          }
+          try {
+            Thread.sleep(5000); // 5 seconds
+          } catch (InterruptedException ie) {
+            LOGGER
+            .error("InterruptedException received for ExpiredTokenRemover thread "
+                + ie);
+          }
+        } catch (Throwable t) {
+          LOGGER.error("ExpiredTokenRemover thread received unexpected exception. "
+                           + t, t);
+          // Wait 5 seconds too in case of an exception, so we do not end up in busy waiting for
+          // the solution for this exception
+          try {
+            Thread.sleep(5000); // 5 seconds
+          } catch (InterruptedException ie) {
+            LOGGER.error("InterruptedException received for ExpiredTokenRemover thread during " +
+                "wait in exception sleep " + ie);
+          }
+        }
+      }
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java
new file mode 100644
index 0000000..7588c9f
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/HdfsUtils.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.utils;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.security.auth.login.LoginException;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URI;
+
+public class HdfsUtils {
+  private static final Logger LOG = LoggerFactory.getLogger(HdfsUtils.class);
+
+  /**
+   * Check the permissions on a file.
+   * @param fs Filesystem the file is contained in
+   * @param stat Stat info for the file
+   * @param action action to be performed
+   * @throws IOException If thrown by Hadoop
+   * @throws AccessControlException if the file cannot be accessed
+   */
+  public static void checkFileAccess(FileSystem fs, FileStatus stat, FsAction action)
+      throws IOException, LoginException {
+    checkFileAccess(fs, stat, action, SecurityUtils.getUGI());
+  }
+
+  /**
+   * Check the permissions on a file
+   * @param fs Filesystem the file is contained in
+   * @param stat Stat info for the file
+   * @param action action to be performed
+   * @param ugi user group info for the current user.  This is passed in so that tests can pass
+   *            in mock ones.
+   * @throws IOException If thrown by Hadoop
+   * @throws AccessControlException if the file cannot be accessed
+   */
+  @VisibleForTesting
+  static void checkFileAccess(FileSystem fs, FileStatus stat, FsAction action,
+                              UserGroupInformation ugi) throws IOException {
+
+    String user = ugi.getShortUserName();
+    String[] groups = ugi.getGroupNames();
+
+    if (groups != null) {
+      String superGroupName = fs.getConf().get("dfs.permissions.supergroup", "");
+      if (arrayContains(groups, superGroupName)) {
+        LOG.debug("User \"" + user + "\" belongs to super-group \"" + superGroupName + "\". " +
+            "Permission granted for action: " + action + ".");
+        return;
+      }
+    }
+
+    FsPermission dirPerms = stat.getPermission();
+
+    if (user.equals(stat.getOwner())) {
+      if (dirPerms.getUserAction().implies(action)) {
+        return;
+      }
+    } else if (arrayContains(groups, stat.getGroup())) {
+      if (dirPerms.getGroupAction().implies(action)) {
+        return;
+      }
+    } else if (dirPerms.getOtherAction().implies(action)) {
+      return;
+    }
+    throw new AccessControlException("action " + action + " not permitted on path "
+        + stat.getPath() + " for user " + user);
+  }
+
+  public static boolean isPathEncrypted(Configuration conf, URI fsUri, Path path)
+      throws IOException {
+    Path fullPath;
+    if (path.isAbsolute()) {
+      fullPath = path;
+    } else {
+      fullPath = path.getFileSystem(conf).makeQualified(path);
+    }
+    if(!"hdfs".equalsIgnoreCase(path.toUri().getScheme())) {
+      return false;
+    }
+    try {
+      HdfsAdmin hdfsAdmin = new HdfsAdmin(fsUri, conf);
+      return (hdfsAdmin.getEncryptionZoneForPath(fullPath) != null);
+    } catch (FileNotFoundException fnfe) {
+      LOG.debug("Failed to get EZ for non-existent path: "+ fullPath, fnfe);
+      return false;
+    }
+  }
+
+  private static boolean arrayContains(String[] array, String value) {
+    if (array == null) return false;
+    for (String element : array) {
+      if (element.equals(value)) return true;
+    }
+    return false;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java
new file mode 100644
index 0000000..9f0ca82
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/utils/SecurityUtils.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.utils;
+
+import org.apache.hadoop.security.UserGroupInformation;
+
+import javax.security.auth.login.LoginException;
+import java.io.IOException;
+
+public class SecurityUtils {
+  public static UserGroupInformation getUGI() throws LoginException, IOException {
+    String doAs = System.getenv("HADOOP_USER_NAME");
+    if (doAs != null && doAs.length() > 0) {
+     /*
+      * this allows doAs (proxy user) to be passed along across process boundary where
+      * delegation tokens are not supported.  For example, a DDL stmt via WebHCat with
+      * a doAs parameter, forks to 'hcat' which needs to start a Session that
+      * proxies the end user
+      */
+      return UserGroupInformation.createProxyUser(doAs, UserGroupInformation.getLoginUser());
+    }
+    return UserGroupInformation.getCurrentUser();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/standalone-metastore/src/main/java/org/apache/hadoop/security/token/delegation/MetastoreDelegationTokenSupport.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/security/token/delegation/MetastoreDelegationTokenSupport.java b/standalone-metastore/src/main/java/org/apache/hadoop/security/token/delegation/MetastoreDelegationTokenSupport.java
new file mode 100644
index 0000000..3206681
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/security/token/delegation/MetastoreDelegationTokenSupport.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.token.delegation;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager.DelegationTokenInformation;
+
+/**
+ * Workaround for serialization of {@link DelegationTokenInformation} through package access.
+ * Future version of Hadoop should add this to DelegationTokenInformation itself.
+ */
+public final class MetastoreDelegationTokenSupport {
+
+  private MetastoreDelegationTokenSupport() {}
+
+  public static byte[] encodeDelegationTokenInformation(DelegationTokenInformation token) {
+    try {
+      ByteArrayOutputStream bos = new ByteArrayOutputStream();
+      DataOutputStream out = new DataOutputStream(bos);
+      WritableUtils.writeVInt(out, token.password.length);
+      out.write(token.password);
+      out.writeLong(token.renewDate);
+      out.flush();
+      return bos.toByteArray();
+    } catch (IOException ex) {
+      throw new RuntimeException("Failed to encode token.", ex);
+    }
+  }
+
+  public static DelegationTokenInformation decodeDelegationTokenInformation(byte[] tokenBytes)
+      throws IOException {
+    DataInputStream in = new DataInputStream(new ByteArrayInputStream(tokenBytes));
+    DelegationTokenInformation token = new DelegationTokenInformation(0, null);
+    int len = WritableUtils.readVInt(in);
+    token.password = new byte[len];
+    in.readFully(token.password);
+    token.renewDate = in.readLong();
+    return token;
+  }
+
+  public static void rollMasterKey(
+      AbstractDelegationTokenSecretManager<? extends AbstractDelegationTokenIdentifier> mgr)
+      throws IOException {
+    mgr.rollMasterKey();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/utils/TestHdfsUtils.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/utils/TestHdfsUtils.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/utils/TestHdfsUtils.java
new file mode 100644
index 0000000..b5f37eb
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/utils/TestHdfsUtils.java
@@ -0,0 +1,193 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.utils;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import javax.security.auth.login.LoginException;
+import java.io.IOException;
+import java.util.Random;
+
+public class TestHdfsUtils {
+
+  private Random rand = new Random();
+
+  private Path createFile(FileSystem fs, FsPermission perms) throws IOException {
+    StringBuilder buf = new StringBuilder();
+    for (int i = 0; i < 10; i++) {
+      buf.append((char)(rand.nextInt(26) + 'a'));
+    }
+    Path p = new Path(buf.toString());
+    FSDataOutputStream os = fs.create(p);
+    os.writeBytes("Mary had a little lamb\nit's fleece was white as snow\nand anywhere that Mary " +
+        "went\nthe lamb was sure to go\n");
+    os.close();
+    fs.setPermission(p, perms);
+    fs.deleteOnExit(p);
+    return p;
+  }
+
+  private Configuration makeConf() {
+    // Make sure that the user doesn't happen to be in the super group
+    Configuration conf = new Configuration();
+    conf.set("dfs.permissions.supergroup", "ubermensch");
+    return conf;
+  }
+
+  private UserGroupInformation ugiInvalidUserValidGroups() throws LoginException, IOException {
+    UserGroupInformation ugi = Mockito.mock(UserGroupInformation.class);
+    Mockito.when(ugi.getShortUserName()).thenReturn("nosuchuser");
+    Mockito.when(ugi.getGroupNames()).thenReturn(SecurityUtils.getUGI().getGroupNames());
+    return ugi;
+  }
+
+  private UserGroupInformation ugiInvalidUserInvalidGroups() {
+    UserGroupInformation ugi = Mockito.mock(UserGroupInformation.class);
+    Mockito.when(ugi.getShortUserName()).thenReturn("nosuchuser");
+    Mockito.when(ugi.getGroupNames()).thenReturn(new String[]{"nosuchgroup"});
+    return ugi;
+  }
+
+  @Test
+  public void userReadWriteExecute() throws IOException, LoginException {
+    FileSystem fs = FileSystem.get(makeConf());
+    Path p = createFile(fs, new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE));
+    UserGroupInformation ugi = SecurityUtils.getUGI();
+    HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.READ, ugi);
+    HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.WRITE, ugi);
+    HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.EXECUTE, ugi);
+  }
+
+  @Test(expected = AccessControlException.class)
+  public void userNoRead() throws IOException, LoginException {
+    FileSystem fs = FileSystem.get(makeConf());
+    Path p = createFile(fs, new FsPermission(FsAction.NONE, FsAction.ALL, FsAction.ALL));
+    UserGroupInformation ugi = SecurityUtils.getUGI();
+    HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.READ, ugi);
+  }
+
+  @Test(expected = AccessControlException.class)
+  public void userNoWrite() throws IOException, LoginException {
+    FileSystem fs = FileSystem.get(makeConf());
+    Path p = createFile(fs, new FsPermission(FsAction.NONE, FsAction.ALL, FsAction.ALL));
+    UserGroupInformation ugi = SecurityUtils.getUGI();
+    HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.WRITE, ugi);
+  }
+
+  @Test(expected = AccessControlException.class)
+  public void userNoExecute() throws IOException, LoginException {
+    FileSystem fs = FileSystem.get(makeConf());
+    Path p = createFile(fs, new FsPermission(FsAction.NONE, FsAction.ALL, FsAction.ALL));
+    UserGroupInformation ugi = SecurityUtils.getUGI();
+    HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.EXECUTE, ugi);
+  }
+
+  @Test
+  public void groupReadWriteExecute() throws IOException, LoginException {
+    FileSystem fs = FileSystem.get(makeConf());
+    Path p = createFile(fs, new FsPermission(FsAction.NONE, FsAction.ALL, FsAction.NONE));
+    UserGroupInformation ugi = ugiInvalidUserValidGroups();
+    HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.READ, ugi);
+    HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.WRITE, ugi);
+    HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.EXECUTE, ugi);
+  }
+
+  @Test(expected = AccessControlException.class)
+  public void groupNoRead() throws IOException, LoginException {
+    FileSystem fs = FileSystem.get(makeConf());
+    Path p = createFile(fs, new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.ALL));
+    UserGroupInformation ugi = ugiInvalidUserValidGroups();
+    HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.READ, ugi);
+  }
+
+  @Test(expected = AccessControlException.class)
+  public void groupNoWrite() throws IOException, LoginException {
+    FileSystem fs = FileSystem.get(makeConf());
+    Path p = createFile(fs, new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.ALL));
+    UserGroupInformation ugi = ugiInvalidUserValidGroups();
+    HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.WRITE, ugi);
+  }
+
+  @Test(expected = AccessControlException.class)
+  public void groupNoExecute() throws IOException, LoginException {
+    FileSystem fs = FileSystem.get(makeConf());
+    Path p = createFile(fs, new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.ALL));
+    UserGroupInformation ugi = ugiInvalidUserValidGroups();
+    HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.EXECUTE, ugi);
+  }
+
+  @Test
+  public void otherReadWriteExecute() throws IOException, LoginException {
+    FileSystem fs = FileSystem.get(makeConf());
+    Path p = createFile(fs, new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.ALL));
+    UserGroupInformation ugi = ugiInvalidUserInvalidGroups();
+    HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.READ, ugi);
+    HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.WRITE, ugi);
+    HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.EXECUTE, ugi);
+  }
+
+  @Test(expected = AccessControlException.class)
+  public void otherNoRead() throws IOException, LoginException {
+    FileSystem fs = FileSystem.get(makeConf());
+    Path p = createFile(fs, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.NONE));
+    UserGroupInformation ugi = ugiInvalidUserInvalidGroups();
+    HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.READ, ugi);
+  }
+
+  @Test(expected = AccessControlException.class)
+  public void otherNoWrite() throws IOException, LoginException {
+    FileSystem fs = FileSystem.get(makeConf());
+    Path p = createFile(fs, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.NONE));
+    UserGroupInformation ugi = ugiInvalidUserInvalidGroups();
+    HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.WRITE, ugi);
+  }
+
+  @Test(expected = AccessControlException.class)
+  public void otherNoExecute() throws IOException, LoginException {
+    FileSystem fs = FileSystem.get(makeConf());
+    Path p = createFile(fs, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.NONE));
+    UserGroupInformation ugi = ugiInvalidUserInvalidGroups();
+    HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.EXECUTE, ugi);
+  }
+
+  @Test
+  public void rootReadWriteExecute() throws IOException, LoginException {
+    UserGroupInformation ugi = SecurityUtils.getUGI();
+    FileSystem fs = FileSystem.get(new Configuration());
+    String old = fs.getConf().get("dfs.permissions.supergroup");
+    try {
+      fs.getConf().set("dfs.permissions.supergroup", ugi.getPrimaryGroupName());
+      Path p = createFile(fs, new FsPermission(FsAction.NONE, FsAction.NONE, FsAction.NONE));
+      HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.READ, ugi);
+      HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.WRITE, ugi);
+      HdfsUtils.checkFileAccess(fs, fs.getFileStatus(p), FsAction.EXECUTE, ugi);
+    } finally {
+      fs.getConf().set("dfs.permissions.supergroup", old);
+    }
+  }
+
+}


[3/3] hive git commit: HIVE-17241 Change metastore classes to not use the shims. This closes #228. (Alan Gates, reviewed by Vaibhav Gumashta)

Posted by ga...@apache.org.
HIVE-17241 Change metastore classes to not use the shims.  This closes #228.  (Alan Gates, reviewed by Vaibhav Gumashta)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e2770d6e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e2770d6e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e2770d6e

Branch: refs/heads/master
Commit: e2770d6e3818d6becf3a4227d5a2f250a75c813d
Parents: d335558
Author: Alan Gates <ga...@hortonworks.com>
Authored: Fri Aug 18 15:56:00 2017 -0700
Committer: Alan Gates <ga...@hortonworks.com>
Committed: Fri Aug 18 15:56:00 2017 -0700

----------------------------------------------------------------------
 .../apache/hive/hcatalog/cli/TestPermsGrp.java  |   4 +-
 .../mapreduce/TestHCatPartitionPublish.java     |   6 +-
 .../security/TestHadoopAuthBridge23.java        | 416 +++++++++++
 .../hive/thrift/TestHadoopAuthBridge23.java     | 417 -----------
 .../AbstractTestAuthorizationApiAuthorizer.java |   4 +-
 .../hadoop/hive/metastore/TestFilterHooks.java  |   4 +-
 ...TestHiveMetaStoreWithEnvironmentContext.java |   4 +-
 .../metastore/TestMetaStoreAuthorization.java   |   4 +-
 .../TestMetaStoreEndFunctionListener.java       |   5 +-
 .../metastore/TestMetaStoreEventListener.java   |   4 +-
 .../TestMetaStoreEventListenerOnlyOnCommit.java |   4 +-
 .../metastore/TestMetaStoreInitListener.java    |   5 +-
 .../metastore/TestMetaStoreListenersError.java  |   6 +-
 .../hive/metastore/TestMetaStoreMetrics.java    |   4 +-
 .../hive/metastore/TestRemoteHiveMetaStore.java |   4 +-
 .../TestRemoteHiveMetaStoreIpAddress.java       |   4 +-
 .../hive/metastore/TestRetryingHMSHandler.java  |   4 +-
 ...estDDLWithRemoteMetastoreSecondNamenode.java |   4 +-
 .../security/StorageBasedMetastoreTestBase.java |   5 +-
 .../TestAuthorizationPreEventListener.java      |   4 +-
 .../TestClientSideAuthorizationProvider.java    |   4 +-
 .../TestMetastoreAuthorizationProvider.java     |   4 +-
 .../TestMultiAuthorizationPreEventListener.java |   4 +-
 .../org/apache/hive/jdbc/miniHS2/MiniHS2.java   |   4 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    |  27 +-
 .../hive/metastore/HiveMetaStoreClient.java     |  12 +-
 .../hadoop/hive/metastore/MetaStoreUtils.java   |   8 +-
 .../hadoop/hive/metastore/ObjectStore.java      |   5 +-
 .../hive/metastore/TUGIBasedProcessor.java      |   2 +-
 .../apache/hadoop/hive/metastore/Warehouse.java |   4 +-
 .../hadoop/hive/metastore/txn/TxnDbUtil.java    |   5 +-
 .../hadoop/hive/metastore/txn/TxnHandler.java   |   5 +-
 standalone-metastore/pom.xml                    |  16 +
 .../hive/metastore/conf/MetastoreConf.java      |  12 +
 .../security/DelegationTokenIdentifier.java     |  52 ++
 .../security/DelegationTokenSecretManager.java  | 125 ++++
 .../security/DelegationTokenStore.java          | 118 ++++
 .../security/HadoopThriftAuthBridge.java        | 684 +++++++++++++++++++
 .../security/HadoopThriftAuthBridge23.java      | 114 ++++
 .../metastore/security/MemoryTokenStore.java    | 136 ++++
 .../MetastoreDelegationTokenManager.java        | 172 +++++
 .../metastore/security/TFilterTransport.java    |  99 +++
 .../security/TUGIAssumingTransport.java         |  73 ++
 .../security/TUGIContainingTransport.java       |  96 +++
 .../TokenStoreDelegationTokenSecretManager.java | 333 +++++++++
 .../hadoop/hive/metastore/utils/HdfsUtils.java  | 125 ++++
 .../hive/metastore/utils/SecurityUtils.java     |  40 ++
 .../MetastoreDelegationTokenSupport.java        |  68 ++
 .../hive/metastore/utils/TestHdfsUtils.java     | 193 ++++++
 49 files changed, 2949 insertions(+), 503 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java
index 66a5dd4..e863372 100644
--- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java
+++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java
@@ -44,12 +44,12 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.Type;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import org.apache.hadoop.hive.ql.io.HiveInputFormat;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.hive.ql.metadata.Hive;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
-import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hive.hcatalog.ExitException;
 import org.apache.hive.hcatalog.NoExitSecurityManager;
 import org.apache.hive.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer;
@@ -81,7 +81,7 @@ public class TestPermsGrp extends TestCase {
 
 
     msPort = MetaStoreUtils.findFreePort();
-    MetaStoreUtils.startMetaStore(msPort, ShimLoader.getHadoopThriftAuthBridge());
+    MetaStoreUtils.startMetaStore(msPort, HadoopThriftAuthBridge.getBridge());
 
     isServerRunning = true;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java
----------------------------------------------------------------------
diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java
index 69874bc..358dd50 100644
--- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java
+++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java
@@ -41,11 +41,11 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
 import org.apache.hadoop.hive.ql.io.RCFileOutputFormat;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe;
-import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
@@ -54,7 +54,6 @@ import org.apache.hadoop.mapred.MiniMRCluster;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.Mapper;
 import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
-import org.apache.hadoop.util.Shell;
 import org.apache.hive.hcatalog.NoExitSecurityManager;
 import org.apache.hive.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer;
 import org.apache.hive.hcatalog.data.DefaultHCatRecord;
@@ -106,8 +105,7 @@ public class TestHCatPartitionPublish {
 
     msPort = MetaStoreUtils.findFreePort();
 
-    MetaStoreUtils.startMetaStore(msPort, ShimLoader
-        .getHadoopThriftAuthBridge());
+    MetaStoreUtils.startMetaStore(msPort, HadoopThriftAuthBridge.getBridge());
     Thread.sleep(10000);
     isServerRunning = true;
     securityManager = System.getSecurityManager();

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/metastore/security/TestHadoopAuthBridge23.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/metastore/security/TestHadoopAuthBridge23.java b/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/metastore/security/TestHadoopAuthBridge23.java
new file mode 100644
index 0000000..ef36040
--- /dev/null
+++ b/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/metastore/security/TestHadoopAuthBridge23.java
@@ -0,0 +1,416 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ 
+package org.apache.hadoop.hive.metastore.security;
+
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.HiveMetaStore;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.SaslRpcServer;
+import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager.DelegationTokenInformation;
+import org.apache.hadoop.security.token.delegation.DelegationKey;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.thrift.transport.TSaslServerTransport;
+import org.apache.thrift.transport.TTransportException;
+import org.apache.thrift.transport.TTransportFactory;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.NetworkInterface;
+import java.net.ServerSocket;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Enumeration;
+import java.util.List;
+import java.util.Map;
+
+public class TestHadoopAuthBridge23 {
+
+  /**
+   * set to true when metastore token manager has intitialized token manager
+   * through call to HadoopThriftAuthBridge23.Server.startDelegationTokenSecretManager
+   */
+  static volatile boolean isMetastoreTokenManagerInited;
+
+  public static class MyTokenStore extends MemoryTokenStore {
+    static volatile DelegationTokenStore TOKEN_STORE = null;
+    public void init(Object hmsHandler, HadoopThriftAuthBridge.Server.ServerMode smode) throws TokenStoreException {
+      super.init(hmsHandler, smode);
+      TOKEN_STORE = this;
+      try {
+        Thread.sleep(5000);
+      } catch (InterruptedException e) {
+        e.printStackTrace();
+      }
+      isMetastoreTokenManagerInited = true;
+    }
+  }
+
+  private static class MyHadoopThriftAuthBridge23 extends HadoopThriftAuthBridge23 {
+    @Override
+    public Server createServer(String keytabFile, String principalConf)
+    throws TTransportException {
+      //Create a Server that doesn't interpret any Kerberos stuff
+      return new Server();
+    }
+
+    static class Server extends HadoopThriftAuthBridge.Server {
+      public Server() throws TTransportException {
+        super();
+      }
+      @Override
+      public TTransportFactory createTransportFactory(Map<String, String> saslProps)
+      throws TTransportException {
+        TSaslServerTransport.Factory transFactory =
+          new TSaslServerTransport.Factory();
+        transFactory.addServerDefinition(AuthMethod.DIGEST.getMechanismName(),
+            null, SaslRpcServer.SASL_DEFAULT_REALM,
+            saslProps,
+            new SaslDigestCallbackHandler(secretManager));
+
+        return new TUGIAssumingTransportFactory(transFactory, realUgi);
+      }
+
+
+    }
+  }
+
+
+  private HiveConf conf;
+
+  private void configureSuperUserIPAddresses(Configuration conf,
+      String superUserShortName) throws IOException {
+    List<String> ipList = new ArrayList<String>();
+    Enumeration<NetworkInterface> netInterfaceList = NetworkInterface
+        .getNetworkInterfaces();
+    while (netInterfaceList.hasMoreElements()) {
+      NetworkInterface inf = netInterfaceList.nextElement();
+      Enumeration<InetAddress> addrList = inf.getInetAddresses();
+      while (addrList.hasMoreElements()) {
+        InetAddress addr = addrList.nextElement();
+        ipList.add(addr.getHostAddress());
+      }
+    }
+    StringBuilder builder = new StringBuilder();
+    for (String ip : ipList) {
+      builder.append(ip);
+      builder.append(',');
+    }
+    builder.append("127.0.1.1,");
+    builder.append(InetAddress.getLocalHost().getCanonicalHostName());
+    conf.setStrings(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(superUserShortName),
+        builder.toString());
+  }
+  @Before
+  public void setup() throws Exception {
+    isMetastoreTokenManagerInited = false;
+    int port = findFreePort();
+    System.setProperty(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname,
+        "true");
+    System.setProperty(HiveConf.ConfVars.METASTOREURIS.varname,
+        "thrift://localhost:" + port);
+    System.setProperty(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, new Path(
+        System.getProperty("test.build.data", "/tmp")).toString());
+    System.setProperty(HiveConf.ConfVars.METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS.varname,
+        MyTokenStore.class.getName());
+    conf = new HiveConf(TestHadoopAuthBridge23.class);
+    MetaStoreUtils.startMetaStore(port, new MyHadoopThriftAuthBridge23());
+  }
+
+  /**
+   * Test delegation token store/load from shared store.
+   * @throws Exception
+   */
+  @Test
+  public void testDelegationTokenSharedStore() throws Exception {
+    UserGroupInformation clientUgi = UserGroupInformation.getCurrentUser();
+
+    TokenStoreDelegationTokenSecretManager tokenManager =
+        new TokenStoreDelegationTokenSecretManager(0, 60*60*1000, 60*60*1000, 0,
+            MyTokenStore.TOKEN_STORE);
+    // initializes current key
+    tokenManager.startThreads();
+    tokenManager.stopThreads();
+
+    String tokenStrForm = tokenManager.getDelegationToken(clientUgi.getShortUserName());
+    Token<DelegationTokenIdentifier> t= new Token<DelegationTokenIdentifier>();
+    t.decodeFromUrlString(tokenStrForm);
+
+    //check whether the username in the token is what we expect
+    DelegationTokenIdentifier d = new DelegationTokenIdentifier();
+    d.readFields(new DataInputStream(new ByteArrayInputStream(
+        t.getIdentifier())));
+    Assert.assertTrue("Usernames don't match",
+        clientUgi.getShortUserName().equals(d.getUser().getShortUserName()));
+
+    DelegationTokenInformation tokenInfo = MyTokenStore.TOKEN_STORE
+        .getToken(d);
+    Assert.assertNotNull("token not in store", tokenInfo);
+    Assert.assertFalse("duplicate token add",
+        MyTokenStore.TOKEN_STORE.addToken(d, tokenInfo));
+
+    // check keys are copied from token store when token is loaded
+    TokenStoreDelegationTokenSecretManager anotherManager =
+        new TokenStoreDelegationTokenSecretManager(0, 0, 0, 0,
+            MyTokenStore.TOKEN_STORE);
+    Assert.assertEquals("master keys empty on init", 0,
+        anotherManager.getAllKeys().length);
+    Assert.assertNotNull("token loaded",
+        anotherManager.retrievePassword(d));
+    anotherManager.renewToken(t, clientUgi.getShortUserName());
+    Assert.assertEquals("master keys not loaded from store",
+        MyTokenStore.TOKEN_STORE.getMasterKeys().length,
+        anotherManager.getAllKeys().length);
+
+    // cancel the delegation token
+    tokenManager.cancelDelegationToken(tokenStrForm);
+    Assert.assertNull("token not removed from store after cancel",
+        MyTokenStore.TOKEN_STORE.getToken(d));
+    Assert.assertFalse("token removed (again)",
+        MyTokenStore.TOKEN_STORE.removeToken(d));
+    try {
+      anotherManager.retrievePassword(d);
+      Assert.fail("InvalidToken expected after cancel");
+    } catch (InvalidToken ex) {
+      // expected
+    }
+
+    // token expiration
+    MyTokenStore.TOKEN_STORE.addToken(d,
+        new DelegationTokenInformation(0, t.getPassword()));
+    Assert.assertNotNull(MyTokenStore.TOKEN_STORE.getToken(d));
+    anotherManager.removeExpiredTokens();
+    Assert.assertNull("Expired token not removed",
+        MyTokenStore.TOKEN_STORE.getToken(d));
+
+    // key expiration - create an already expired key
+    anotherManager.startThreads(); // generates initial key
+    anotherManager.stopThreads();
+    DelegationKey expiredKey = new DelegationKey(-1, 0, anotherManager.getAllKeys()[0].getKey());
+    anotherManager.logUpdateMasterKey(expiredKey); // updates key with sequence number
+    Assert.assertTrue("expired key not in allKeys",
+        anotherManager.reloadKeys().containsKey(expiredKey.getKeyId()));
+    anotherManager.rollMasterKeyExt();
+    Assert.assertFalse("Expired key not removed",
+        anotherManager.reloadKeys().containsKey(expiredKey.getKeyId()));
+  }
+
+  @Test
+  public void testSaslWithHiveMetaStore() throws Exception {
+    setup();
+    UserGroupInformation clientUgi = UserGroupInformation.getCurrentUser();
+    obtainTokenAndAddIntoUGI(clientUgi, null);
+    obtainTokenAndAddIntoUGI(clientUgi, "tokenForFooTablePartition");
+  }
+
+  @Test
+  public void testMetastoreProxyUser() throws Exception {
+    setup();
+
+    final String proxyUserName = "proxyUser";
+    //set the configuration up such that proxyUser can act on
+    //behalf of all users belonging to the group foo_bar_group (
+    //a dummy group)
+    String[] groupNames =
+      new String[] { "foo_bar_group" };
+    setGroupsInConf(groupNames, proxyUserName);
+
+    final UserGroupInformation delegationTokenUser =
+      UserGroupInformation.getCurrentUser();
+
+    final UserGroupInformation proxyUserUgi =
+      UserGroupInformation.createRemoteUser(proxyUserName);
+    String tokenStrForm = proxyUserUgi.doAs(new PrivilegedExceptionAction<String>() {
+      public String run() throws Exception {
+        try {
+          //Since the user running the test won't belong to a non-existent group
+          //foo_bar_group, the call to getDelegationTokenStr will fail
+          return getDelegationTokenStr(delegationTokenUser, proxyUserUgi);
+        } catch (AuthorizationException ae) {
+          return null;
+        }
+      }
+    });
+    Assert.assertTrue("Expected the getDelegationToken call to fail",
+        tokenStrForm == null);
+
+    //set the configuration up such that proxyUser can act on
+    //behalf of all users belonging to the real group(s) that the
+    //user running the test belongs to
+    setGroupsInConf(UserGroupInformation.getCurrentUser().getGroupNames(),
+        proxyUserName);
+    tokenStrForm = proxyUserUgi.doAs(new PrivilegedExceptionAction<String>() {
+      public String run() throws Exception {
+        try {
+          //Since the user running the test belongs to the group
+          //obtained above the call to getDelegationTokenStr will succeed
+          return getDelegationTokenStr(delegationTokenUser, proxyUserUgi);
+        } catch (AuthorizationException ae) {
+          return null;
+        }
+      }
+    });
+    Assert.assertTrue("Expected the getDelegationToken call to not fail",
+        tokenStrForm != null);
+    Token<DelegationTokenIdentifier> t= new Token<DelegationTokenIdentifier>();
+    t.decodeFromUrlString(tokenStrForm);
+    //check whether the username in the token is what we expect
+    DelegationTokenIdentifier d = new DelegationTokenIdentifier();
+    d.readFields(new DataInputStream(new ByteArrayInputStream(
+        t.getIdentifier())));
+    Assert.assertTrue("Usernames don't match",
+        delegationTokenUser.getShortUserName().equals(d.getUser().getShortUserName()));
+
+  }
+
+  private void setGroupsInConf(String[] groupNames, String proxyUserName)
+  throws IOException {
+   conf.set(
+      DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(proxyUserName),
+      StringUtils.join(",", Arrays.asList(groupNames)));
+    configureSuperUserIPAddresses(conf, proxyUserName);
+    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
+  }
+
+  private String getDelegationTokenStr(UserGroupInformation ownerUgi,
+      UserGroupInformation realUgi) throws Exception {
+    //obtain a token by directly invoking the metastore operation(without going
+    //through the thrift interface). Obtaining a token makes the secret manager
+    //aware of the user and that it gave the token to the user
+    //also set the authentication method explicitly to KERBEROS. Since the
+    //metastore checks whether the authentication method is KERBEROS or not
+    //for getDelegationToken, and the testcases don't use
+    //kerberos, this needs to be done
+
+    waitForMetastoreTokenInit();
+
+    HadoopThriftAuthBridge.Server.authenticationMethod
+                             .set(AuthenticationMethod.KERBEROS);
+    return
+        HiveMetaStore.getDelegationToken(ownerUgi.getShortUserName(),
+            realUgi.getShortUserName(), InetAddress.getLocalHost().getHostAddress());
+  }
+
+  /**
+   * Wait for metastore to have initialized token manager
+   * This does not have to be done in other metastore test cases as they
+   * use metastore client which will retry few times on failure
+   * @throws InterruptedException
+   */
+  private void waitForMetastoreTokenInit() throws InterruptedException {
+    int waitAttempts = 30;
+    while(waitAttempts > 0 && !isMetastoreTokenManagerInited){
+      Thread.sleep(1000);
+      waitAttempts--;
+    }
+  }
+
+  private void obtainTokenAndAddIntoUGI(UserGroupInformation clientUgi,
+      String tokenSig) throws Exception {
+    String tokenStrForm = getDelegationTokenStr(clientUgi, clientUgi);
+    Token<DelegationTokenIdentifier> t= new Token<DelegationTokenIdentifier>();
+    t.decodeFromUrlString(tokenStrForm);
+
+    //check whether the username in the token is what we expect
+    DelegationTokenIdentifier d = new DelegationTokenIdentifier();
+    d.readFields(new DataInputStream(new ByteArrayInputStream(
+        t.getIdentifier())));
+    Assert.assertTrue("Usernames don't match",
+        clientUgi.getShortUserName().equals(d.getUser().getShortUserName()));
+
+    if (tokenSig != null) {
+      conf.setVar(HiveConf.ConfVars.METASTORE_TOKEN_SIGNATURE, tokenSig);
+      t.setService(new Text(tokenSig));
+    }
+    //add the token to the clientUgi for securely talking to the metastore
+    clientUgi.addToken(t);
+    //Create the metastore client as the clientUgi. Doing so this
+    //way will give the client access to the token that was added earlier
+    //in the clientUgi
+    HiveMetaStoreClient hiveClient =
+      clientUgi.doAs(new PrivilegedExceptionAction<HiveMetaStoreClient>() {
+        public HiveMetaStoreClient run() throws Exception {
+          HiveMetaStoreClient hiveClient =
+            new HiveMetaStoreClient(conf);
+          return hiveClient;
+        }
+      });
+
+    Assert.assertTrue("Couldn't connect to metastore", hiveClient != null);
+
+    //try out some metastore operations
+    createDBAndVerifyExistence(hiveClient);
+
+    hiveClient.close();
+
+    //Now cancel the delegation token
+    HiveMetaStore.cancelDelegationToken(tokenStrForm);
+
+    //now metastore connection should fail
+    hiveClient =
+      clientUgi.doAs(new PrivilegedExceptionAction<HiveMetaStoreClient>() {
+        public HiveMetaStoreClient run() {
+          try {
+            return new HiveMetaStoreClient(conf);
+          } catch (MetaException e) {
+            return null;
+          }
+        }
+      });
+    Assert.assertTrue("Expected metastore operations to fail", hiveClient == null);
+  }
+
+  private void createDBAndVerifyExistence(HiveMetaStoreClient client)
+  throws Exception {
+    String dbName = "simpdb";
+    Database db = new Database();
+    db.setName(dbName);
+    client.createDatabase(db);
+    Database db1 = client.getDatabase(dbName);
+    client.dropDatabase(dbName);
+    Assert.assertTrue("Databases do not match", db1.getName().equals(db.getName()));
+  }
+
+  private int findFreePort() throws IOException {
+    ServerSocket socket= new ServerSocket(0);
+    int port = socket.getLocalPort();
+    socket.close();
+    return port;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/thrift/TestHadoopAuthBridge23.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/thrift/TestHadoopAuthBridge23.java b/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/thrift/TestHadoopAuthBridge23.java
deleted file mode 100644
index 8656fff..0000000
--- a/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/thrift/TestHadoopAuthBridge23.java
+++ /dev/null
@@ -1,417 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
- 
-package org.apache.hadoop.hive.thrift;
-
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStore;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge.Server.ServerMode;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.security.SaslRpcServer;
-import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-import org.apache.hadoop.security.authorize.AuthorizationException;
-import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
-import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.apache.hadoop.security.token.SecretManager.InvalidToken;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager.DelegationTokenInformation;
-import org.apache.hadoop.security.token.delegation.DelegationKey;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.thrift.transport.TSaslServerTransport;
-import org.apache.thrift.transport.TTransportException;
-import org.apache.thrift.transport.TTransportFactory;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.io.ByteArrayInputStream;
-import java.io.DataInputStream;
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.NetworkInterface;
-import java.net.ServerSocket;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Enumeration;
-import java.util.List;
-import java.util.Map;
-
-public class TestHadoopAuthBridge23 {
-
-  /**
-   * set to true when metastore token manager has intitialized token manager
-   * through call to HadoopThriftAuthBridge23.Server.startDelegationTokenSecretManager
-   */
-  static volatile boolean isMetastoreTokenManagerInited;
-
-  public static class MyTokenStore extends MemoryTokenStore {
-    static volatile DelegationTokenStore TOKEN_STORE = null;
-    public void init(Object hmsHandler, ServerMode smode) throws TokenStoreException {
-      super.init(hmsHandler, smode);
-      TOKEN_STORE = this;
-      try {
-        Thread.sleep(5000);
-      } catch (InterruptedException e) {
-        e.printStackTrace();
-      }
-      isMetastoreTokenManagerInited = true;
-    }
-  }
-
-  private static class MyHadoopThriftAuthBridge23 extends HadoopThriftAuthBridge23 {
-    @Override
-    public Server createServer(String keytabFile, String principalConf)
-    throws TTransportException {
-      //Create a Server that doesn't interpret any Kerberos stuff
-      return new Server();
-    }
-
-    static class Server extends HadoopThriftAuthBridge.Server {
-      public Server() throws TTransportException {
-        super();
-      }
-      @Override
-      public TTransportFactory createTransportFactory(Map<String, String> saslProps)
-      throws TTransportException {
-        TSaslServerTransport.Factory transFactory =
-          new TSaslServerTransport.Factory();
-        transFactory.addServerDefinition(AuthMethod.DIGEST.getMechanismName(),
-            null, SaslRpcServer.SASL_DEFAULT_REALM,
-            saslProps,
-            new SaslDigestCallbackHandler(secretManager));
-
-        return new TUGIAssumingTransportFactory(transFactory, realUgi);
-      }
-
-
-    }
-  }
-
-
-  private HiveConf conf;
-
-  private void configureSuperUserIPAddresses(Configuration conf,
-      String superUserShortName) throws IOException {
-    List<String> ipList = new ArrayList<String>();
-    Enumeration<NetworkInterface> netInterfaceList = NetworkInterface
-        .getNetworkInterfaces();
-    while (netInterfaceList.hasMoreElements()) {
-      NetworkInterface inf = netInterfaceList.nextElement();
-      Enumeration<InetAddress> addrList = inf.getInetAddresses();
-      while (addrList.hasMoreElements()) {
-        InetAddress addr = addrList.nextElement();
-        ipList.add(addr.getHostAddress());
-      }
-    }
-    StringBuilder builder = new StringBuilder();
-    for (String ip : ipList) {
-      builder.append(ip);
-      builder.append(',');
-    }
-    builder.append("127.0.1.1,");
-    builder.append(InetAddress.getLocalHost().getCanonicalHostName());
-    conf.setStrings(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(superUserShortName),
-        builder.toString());
-  }
-  @Before
-  public void setup() throws Exception {
-    isMetastoreTokenManagerInited = false;
-    int port = findFreePort();
-    System.setProperty(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname,
-        "true");
-    System.setProperty(HiveConf.ConfVars.METASTOREURIS.varname,
-        "thrift://localhost:" + port);
-    System.setProperty(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, new Path(
-        System.getProperty("test.build.data", "/tmp")).toString());
-    System.setProperty(HiveConf.ConfVars.METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS.varname,
-        MyTokenStore.class.getName());
-    conf = new HiveConf(TestHadoopAuthBridge23.class);
-    MetaStoreUtils.startMetaStore(port, new MyHadoopThriftAuthBridge23());
-  }
-
-  /**
-   * Test delegation token store/load from shared store.
-   * @throws Exception
-   */
-  @Test
-  public void testDelegationTokenSharedStore() throws Exception {
-    UserGroupInformation clientUgi = UserGroupInformation.getCurrentUser();
-
-    TokenStoreDelegationTokenSecretManager tokenManager =
-        new TokenStoreDelegationTokenSecretManager(0, 60*60*1000, 60*60*1000, 0,
-            MyTokenStore.TOKEN_STORE);
-    // initializes current key
-    tokenManager.startThreads();
-    tokenManager.stopThreads();
-
-    String tokenStrForm = tokenManager.getDelegationToken(clientUgi.getShortUserName());
-    Token<DelegationTokenIdentifier> t= new Token<DelegationTokenIdentifier>();
-    t.decodeFromUrlString(tokenStrForm);
-
-    //check whether the username in the token is what we expect
-    DelegationTokenIdentifier d = new DelegationTokenIdentifier();
-    d.readFields(new DataInputStream(new ByteArrayInputStream(
-        t.getIdentifier())));
-    Assert.assertTrue("Usernames don't match",
-        clientUgi.getShortUserName().equals(d.getUser().getShortUserName()));
-
-    DelegationTokenInformation tokenInfo = MyTokenStore.TOKEN_STORE
-        .getToken(d);
-    Assert.assertNotNull("token not in store", tokenInfo);
-    Assert.assertFalse("duplicate token add",
-        MyTokenStore.TOKEN_STORE.addToken(d, tokenInfo));
-
-    // check keys are copied from token store when token is loaded
-    TokenStoreDelegationTokenSecretManager anotherManager =
-        new TokenStoreDelegationTokenSecretManager(0, 0, 0, 0,
-            MyTokenStore.TOKEN_STORE);
-    Assert.assertEquals("master keys empty on init", 0,
-        anotherManager.getAllKeys().length);
-    Assert.assertNotNull("token loaded",
-        anotherManager.retrievePassword(d));
-    anotherManager.renewToken(t, clientUgi.getShortUserName());
-    Assert.assertEquals("master keys not loaded from store",
-        MyTokenStore.TOKEN_STORE.getMasterKeys().length,
-        anotherManager.getAllKeys().length);
-
-    // cancel the delegation token
-    tokenManager.cancelDelegationToken(tokenStrForm);
-    Assert.assertNull("token not removed from store after cancel",
-        MyTokenStore.TOKEN_STORE.getToken(d));
-    Assert.assertFalse("token removed (again)",
-        MyTokenStore.TOKEN_STORE.removeToken(d));
-    try {
-      anotherManager.retrievePassword(d);
-      Assert.fail("InvalidToken expected after cancel");
-    } catch (InvalidToken ex) {
-      // expected
-    }
-
-    // token expiration
-    MyTokenStore.TOKEN_STORE.addToken(d,
-        new DelegationTokenInformation(0, t.getPassword()));
-    Assert.assertNotNull(MyTokenStore.TOKEN_STORE.getToken(d));
-    anotherManager.removeExpiredTokens();
-    Assert.assertNull("Expired token not removed",
-        MyTokenStore.TOKEN_STORE.getToken(d));
-
-    // key expiration - create an already expired key
-    anotherManager.startThreads(); // generates initial key
-    anotherManager.stopThreads();
-    DelegationKey expiredKey = new DelegationKey(-1, 0, anotherManager.getAllKeys()[0].getKey());
-    anotherManager.logUpdateMasterKey(expiredKey); // updates key with sequence number
-    Assert.assertTrue("expired key not in allKeys",
-        anotherManager.reloadKeys().containsKey(expiredKey.getKeyId()));
-    anotherManager.rollMasterKeyExt();
-    Assert.assertFalse("Expired key not removed",
-        anotherManager.reloadKeys().containsKey(expiredKey.getKeyId()));
-  }
-
-  @Test
-  public void testSaslWithHiveMetaStore() throws Exception {
-    setup();
-    UserGroupInformation clientUgi = UserGroupInformation.getCurrentUser();
-    obtainTokenAndAddIntoUGI(clientUgi, null);
-    obtainTokenAndAddIntoUGI(clientUgi, "tokenForFooTablePartition");
-  }
-
-  @Test
-  public void testMetastoreProxyUser() throws Exception {
-    setup();
-
-    final String proxyUserName = "proxyUser";
-    //set the configuration up such that proxyUser can act on
-    //behalf of all users belonging to the group foo_bar_group (
-    //a dummy group)
-    String[] groupNames =
-      new String[] { "foo_bar_group" };
-    setGroupsInConf(groupNames, proxyUserName);
-
-    final UserGroupInformation delegationTokenUser =
-      UserGroupInformation.getCurrentUser();
-
-    final UserGroupInformation proxyUserUgi =
-      UserGroupInformation.createRemoteUser(proxyUserName);
-    String tokenStrForm = proxyUserUgi.doAs(new PrivilegedExceptionAction<String>() {
-      public String run() throws Exception {
-        try {
-          //Since the user running the test won't belong to a non-existent group
-          //foo_bar_group, the call to getDelegationTokenStr will fail
-          return getDelegationTokenStr(delegationTokenUser, proxyUserUgi);
-        } catch (AuthorizationException ae) {
-          return null;
-        }
-      }
-    });
-    Assert.assertTrue("Expected the getDelegationToken call to fail",
-        tokenStrForm == null);
-
-    //set the configuration up such that proxyUser can act on
-    //behalf of all users belonging to the real group(s) that the
-    //user running the test belongs to
-    setGroupsInConf(UserGroupInformation.getCurrentUser().getGroupNames(),
-        proxyUserName);
-    tokenStrForm = proxyUserUgi.doAs(new PrivilegedExceptionAction<String>() {
-      public String run() throws Exception {
-        try {
-          //Since the user running the test belongs to the group
-          //obtained above the call to getDelegationTokenStr will succeed
-          return getDelegationTokenStr(delegationTokenUser, proxyUserUgi);
-        } catch (AuthorizationException ae) {
-          return null;
-        }
-      }
-    });
-    Assert.assertTrue("Expected the getDelegationToken call to not fail",
-        tokenStrForm != null);
-    Token<DelegationTokenIdentifier> t= new Token<DelegationTokenIdentifier>();
-    t.decodeFromUrlString(tokenStrForm);
-    //check whether the username in the token is what we expect
-    DelegationTokenIdentifier d = new DelegationTokenIdentifier();
-    d.readFields(new DataInputStream(new ByteArrayInputStream(
-        t.getIdentifier())));
-    Assert.assertTrue("Usernames don't match",
-        delegationTokenUser.getShortUserName().equals(d.getUser().getShortUserName()));
-
-  }
-
-  private void setGroupsInConf(String[] groupNames, String proxyUserName)
-  throws IOException {
-   conf.set(
-      DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(proxyUserName),
-      StringUtils.join(",", Arrays.asList(groupNames)));
-    configureSuperUserIPAddresses(conf, proxyUserName);
-    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
-  }
-
-  private String getDelegationTokenStr(UserGroupInformation ownerUgi,
-      UserGroupInformation realUgi) throws Exception {
-    //obtain a token by directly invoking the metastore operation(without going
-    //through the thrift interface). Obtaining a token makes the secret manager
-    //aware of the user and that it gave the token to the user
-    //also set the authentication method explicitly to KERBEROS. Since the
-    //metastore checks whether the authentication method is KERBEROS or not
-    //for getDelegationToken, and the testcases don't use
-    //kerberos, this needs to be done
-
-    waitForMetastoreTokenInit();
-
-    HadoopThriftAuthBridge.Server.authenticationMethod
-                             .set(AuthenticationMethod.KERBEROS);
-    return
-        HiveMetaStore.getDelegationToken(ownerUgi.getShortUserName(),
-            realUgi.getShortUserName(), InetAddress.getLocalHost().getHostAddress());
-  }
-
-  /**
-   * Wait for metastore to have initialized token manager
-   * This does not have to be done in other metastore test cases as they
-   * use metastore client which will retry few times on failure
-   * @throws InterruptedException
-   */
-  private void waitForMetastoreTokenInit() throws InterruptedException {
-    int waitAttempts = 30;
-    while(waitAttempts > 0 && !isMetastoreTokenManagerInited){
-      Thread.sleep(1000);
-      waitAttempts--;
-    }
-  }
-
-  private void obtainTokenAndAddIntoUGI(UserGroupInformation clientUgi,
-      String tokenSig) throws Exception {
-    String tokenStrForm = getDelegationTokenStr(clientUgi, clientUgi);
-    Token<DelegationTokenIdentifier> t= new Token<DelegationTokenIdentifier>();
-    t.decodeFromUrlString(tokenStrForm);
-
-    //check whether the username in the token is what we expect
-    DelegationTokenIdentifier d = new DelegationTokenIdentifier();
-    d.readFields(new DataInputStream(new ByteArrayInputStream(
-        t.getIdentifier())));
-    Assert.assertTrue("Usernames don't match",
-        clientUgi.getShortUserName().equals(d.getUser().getShortUserName()));
-
-    if (tokenSig != null) {
-      conf.setVar(HiveConf.ConfVars.METASTORE_TOKEN_SIGNATURE, tokenSig);
-      t.setService(new Text(tokenSig));
-    }
-    //add the token to the clientUgi for securely talking to the metastore
-    clientUgi.addToken(t);
-    //Create the metastore client as the clientUgi. Doing so this
-    //way will give the client access to the token that was added earlier
-    //in the clientUgi
-    HiveMetaStoreClient hiveClient =
-      clientUgi.doAs(new PrivilegedExceptionAction<HiveMetaStoreClient>() {
-        public HiveMetaStoreClient run() throws Exception {
-          HiveMetaStoreClient hiveClient =
-            new HiveMetaStoreClient(conf);
-          return hiveClient;
-        }
-      });
-
-    Assert.assertTrue("Couldn't connect to metastore", hiveClient != null);
-
-    //try out some metastore operations
-    createDBAndVerifyExistence(hiveClient);
-
-    hiveClient.close();
-
-    //Now cancel the delegation token
-    HiveMetaStore.cancelDelegationToken(tokenStrForm);
-
-    //now metastore connection should fail
-    hiveClient =
-      clientUgi.doAs(new PrivilegedExceptionAction<HiveMetaStoreClient>() {
-        public HiveMetaStoreClient run() {
-          try {
-            return new HiveMetaStoreClient(conf);
-          } catch (MetaException e) {
-            return null;
-          }
-        }
-      });
-    Assert.assertTrue("Expected metastore operations to fail", hiveClient == null);
-  }
-
-  private void createDBAndVerifyExistence(HiveMetaStoreClient client)
-  throws Exception {
-    String dbName = "simpdb";
-    Database db = new Database();
-    db.setName(dbName);
-    client.createDatabase(db);
-    Database db1 = client.getDatabase(dbName);
-    client.dropDatabase(dbName);
-    Assert.assertTrue("Databases do not match", db1.getName().equals(db.getName()));
-  }
-
-  private int findFreePort() throws IOException {
-    ServerSocket socket= new ServerSocket(0);
-    int port = socket.getLocalPort();
-    socket.close();
-    return port;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java
index 341c7ba..5ea809f 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java
@@ -31,9 +31,9 @@ import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 import org.apache.hadoop.hive.metastore.api.Role;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import org.apache.hadoop.hive.ql.security.authorization.MetaStoreAuthzAPIAuthorizerEmbedOnly;
 import org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener;
-import org.apache.hadoop.hive.shims.ShimLoader;
 import org.junit.Test;
 
 /**
@@ -58,7 +58,7 @@ public abstract class AbstractTestAuthorizationApiAuthorizer {
     hiveConf = new HiveConf();
     if (isRemoteMetastoreMode) {
       int port = MetaStoreUtils.findFreePort();
-      MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
+      MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge());
       hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
     }
     hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java
index 1073abb..fc85d86 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java
@@ -35,9 +35,9 @@ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.PartitionSpec;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.shims.ShimLoader;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -175,7 +175,7 @@ public class TestFilterHooks {
     UtilsForTest.setNewDerbyDbLocation(hiveConf, TestFilterHooks.class.getSimpleName());
     int port = MetaStoreUtils.findFreePort();
     hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
-    MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge(), hiveConf);
+    MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), hiveConf);
 
     SessionState.start(new CliSessionState(hiveConf));
     msc = new HiveMetaStoreClient(hiveConf);

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java
index d6e4fb7..dee5d77 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java
@@ -42,12 +42,12 @@ import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
 import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.DropTableEvent;
 import org.apache.hadoop.hive.metastore.events.ListenerEvent;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import org.apache.hadoop.hive.ql.io.HiveInputFormat;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
-import org.apache.hadoop.hive.shims.ShimLoader;
 
 /**
  * TestHiveMetaStoreWithEnvironmentContext. Test case for _with_environment_context
@@ -74,7 +74,7 @@ public class TestHiveMetaStoreWithEnvironmentContext extends TestCase {
         DummyListener.class.getName());
 
     int port = MetaStoreUtils.findFreePort();
-    MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
+    MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge());
 
     hiveConf = new HiveConf(this.getClass());
     hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java
index bfee539..6f0282c 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java
@@ -32,7 +32,7 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 
 
 public class TestMetaStoreAuthorization extends TestCase {
@@ -75,7 +75,7 @@ public class TestMetaStoreAuthorization extends TestCase {
 
   public void testMetaStoreAuthorization() throws Exception {
     setup();
-    MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
+    MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge());
     HiveMetaStoreClient client = new HiveMetaStoreClient(conf);
 
     FileSystem fs = null;

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java
index 1e78ff1..4f559d0 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java
@@ -25,9 +25,10 @@ import junit.framework.TestCase;
 import org.apache.hadoop.hive.cli.CliSessionState;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.shims.ShimLoader;
+
 /**
  * TestMetaStoreEventListener. Test case for
  * {@link org.apache.hadoop.hive.metastore.MetaStoreEndFunctionListener}
@@ -48,7 +49,7 @@ public class TestMetaStoreEndFunctionListener extends TestCase {
     System.setProperty("hive.metastore.end.function.listeners",
         DummyEndFunctionListener.class.getName());
     int port = MetaStoreUtils.findFreePort();
-    MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
+    MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge());
     hiveConf = new HiveConf(this.getClass());
     hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
     hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
index f384991..82fb8c7 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
@@ -63,10 +63,10 @@ import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent;
 import org.apache.hadoop.hive.metastore.events.PreDropTableEvent;
 import org.apache.hadoop.hive.metastore.events.PreEventContext;
 import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.processors.SetProcessor;
 import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.shims.ShimLoader;
 
 /**
  * TestMetaStoreEventListener. Test case for
@@ -98,7 +98,7 @@ public class TestMetaStoreEventListener extends TestCase {
     hiveConf = new HiveConf(this.getClass());
 
     hiveConf.setVar(HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN, metaConfVal);
-    MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge(), hiveConf);
+    MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), hiveConf);
 
     hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
     hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java
index 0c3e703..3302937 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java
@@ -25,9 +25,9 @@ import junit.framework.TestCase;
 import org.apache.hadoop.hive.cli.CliSessionState;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.events.ListenerEvent;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.shims.ShimLoader;
 
 /**
  * Ensure that the status of MetaStore events depend on the RawStore's commit status.
@@ -51,7 +51,7 @@ public class TestMetaStoreEventListenerOnlyOnCommit extends TestCase {
             DummyRawStoreControlledCommit.class.getName());
 
     int port = MetaStoreUtils.findFreePort();
-    MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
+    MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge());
 
     hiveConf = new HiveConf(this.getClass());
     hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java
index e8171e5..09b5cd8 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java
@@ -22,10 +22,9 @@ import junit.framework.TestCase;
 
 import org.apache.hadoop.hive.cli.CliSessionState;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.shims.ShimLoader;
 
 /**
  * TestMetaStoreInitListener. Test case for
@@ -43,7 +42,7 @@ public class TestMetaStoreInitListener extends TestCase {
     System.setProperty("hive.metastore.init.hooks",
         DummyMetaStoreInitListener.class.getName());
     int port = MetaStoreUtils.findFreePort();
-    MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
+    MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge());
     hiveConf = new HiveConf(this.getClass());
     hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
     hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java
index d074028..5f86c26 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java
@@ -22,7 +22,7 @@ import junit.framework.Assert;
 import junit.framework.TestCase;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 
 /**
  * Test for unwrapping InvocationTargetException, which is thrown from
@@ -35,7 +35,7 @@ public class TestMetaStoreListenersError extends TestCase {
     System.setProperty("hive.metastore.init.hooks", ErrorInitListener.class.getName());
     int port = MetaStoreUtils.findFreePort();
     try {
-      HiveMetaStore.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
+      HiveMetaStore.startMetaStore(port, HadoopThriftAuthBridge.getBridge());
     } catch (Throwable throwable) {
       Assert.assertEquals(MetaException.class, throwable.getClass());
       Assert.assertEquals(
@@ -52,7 +52,7 @@ public class TestMetaStoreListenersError extends TestCase {
     System.setProperty("hive.metastore.event.listeners", ErrorEventListener.class.getName());
     int port = MetaStoreUtils.findFreePort();
     try {
-      HiveMetaStore.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
+      HiveMetaStore.startMetaStore(port, HadoopThriftAuthBridge.getBridge());
     } catch (Throwable throwable) {
       Assert.assertEquals(MetaException.class, throwable.getClass());
       Assert.assertEquals(

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java
index b462b2a..9b6cab3 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java
@@ -23,9 +23,9 @@ import org.apache.hadoop.hive.common.metrics.common.MetricsConstant;
 import org.apache.hadoop.hive.common.metrics.common.MetricsFactory;
 import org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.shims.ShimLoader;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -58,7 +58,7 @@ public class TestMetaStoreMetrics {
     metrics = (CodahaleMetrics) MetricsFactory.getInstance();
 
     //Increments one HMS connection
-    MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge(), hiveConf);
+    MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), hiveConf);
 
     //Increments one HMS connection (Hive.get())
     SessionState.start(new CliSessionState(hiveConf));

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java
index 878f913..a8f9907 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hive.metastore;
 
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 
 
 public class TestRemoteHiveMetaStore extends TestHiveMetaStore {
@@ -44,7 +44,7 @@ public class TestRemoteHiveMetaStore extends TestHiveMetaStore {
 
     port = MetaStoreUtils.findFreePort();
     System.out.println("Starting MetaStore Server on port " + port);
-    MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge(), hiveConf);
+    MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), hiveConf);
     isServerStarted = true;
 
     // This is default case with setugi off for both client and server

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java
index 63eea27..31580fc 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java
@@ -23,7 +23,7 @@ import junit.framework.TestCase;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import org.apache.hadoop.util.StringUtils;
 
 /**
@@ -52,7 +52,7 @@ public class TestRemoteHiveMetaStoreIpAddress extends TestCase {
     System.out.println("Starting MetaStore Server on port " + port);
     System.setProperty(ConfVars.METASTORE_EVENT_LISTENERS.varname,
         IpAddressListener.class.getName());
-    MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
+    MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge());
     isServerStarted = true;
 
     // This is default case with setugi off for both client and server

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java
index 99c09b7..a7e5eb3 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRetryingHMSHandler.java
@@ -33,11 +33,11 @@ import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import org.apache.hadoop.hive.ql.io.HiveInputFormat;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.hive.serde.serdeConstants;
 import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
-import org.apache.hadoop.hive.shims.ShimLoader;
 
 /**
  * TestRetryingHMSHandler. Test case for
@@ -54,7 +54,7 @@ public class TestRetryingHMSHandler extends TestCase {
     System.setProperty("hive.metastore.pre.event.listeners",
         AlternateFailurePreListener.class.getName());
     int port = MetaStoreUtils.findFreePort();
-    MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
+    MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge());
     hiveConf = new HiveConf(this.getClass());
     hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
     hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
index ce8fe60..36fa81e 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java
@@ -31,11 +31,11 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.Index;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import org.apache.hadoop.hive.ql.exec.mr.ExecDriver;
 import org.apache.hadoop.hive.ql.metadata.*;
 import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
 import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.shims.ShimLoader;
 
 /**
  * Tests DDL with remote metastore service and second namenode (HIVE-6374)
@@ -81,7 +81,7 @@ public class TestDDLWithRemoteMetastoreSecondNamenode extends TestCase {
 
       // Test with remote metastore service
       int port = MetaStoreUtils.findFreePort();
-      MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
+      MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge());
       conf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
       conf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
       conf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java
index 36f31f4..ccf835f 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java
@@ -30,14 +30,13 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener;
 import org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider;
 import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.hive.shims.Utils;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Shell;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -75,7 +74,7 @@ public class StorageBasedMetastoreTestBase {
         InjectableDummyAuthenticator.class.getName());
 
     clientHiveConf = createHiveConf();
-    MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge(), clientHiveConf);
+    MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), clientHiveConf);
 
     // Turn off client-side authorization
     clientHiveConf.setBoolVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED,false);

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java
index b0da884..3b05731 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java
@@ -32,11 +32,11 @@ import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.security.DummyHiveMetastoreAuthorizationProvider.AuthCallContext;
 import org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener;
 import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.shims.ShimLoader;
 
 /**
  * TestAuthorizationPreEventListener. Test case for
@@ -62,7 +62,7 @@ public class TestAuthorizationPreEventListener extends TestCase {
     System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHENTICATOR_MANAGER.varname,
         HadoopDefaultMetastoreAuthenticator.class.getName());
 
-    MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
+    MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge());
 
     clientHiveConf = new HiveConf(this.getClass());
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java
index d895da8..24e49ba 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java
@@ -29,11 +29,11 @@ import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
 import org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
 import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.hive.shims.Utils;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -64,7 +64,7 @@ public class TestClientSideAuthorizationProvider extends TestCase {
     System.setProperty(HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS.varname,
         "");
 
-    MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
+    MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge());
 
     clientHiveConf = new HiveConf(this.getClass());
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java
index 19dc9cf..8b90fa8 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.io.HiveInputFormat;
 import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
@@ -45,7 +46,6 @@ import org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventLis
 import org.apache.hadoop.hive.ql.security.authorization.DefaultHiveMetastoreAuthorizationProvider;
 import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.hive.shims.Utils;
 import org.apache.hadoop.security.UserGroupInformation;
 
@@ -99,7 +99,7 @@ public class TestMetastoreAuthorizationProvider extends TestCase {
     System.setProperty(HiveConf.ConfVars.HIVE_AUTHORIZATION_TABLE_OWNER_GRANTS.varname, "");
 
 
-    MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
+    MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge());
 
     clientHiveConf = createHiveConf();
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java
index 5c9bf05..3d9247d 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java
@@ -27,11 +27,11 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import org.apache.hadoop.hive.ql.Driver;
 import org.apache.hadoop.hive.ql.security.DummyHiveMetastoreAuthorizationProvider.AuthCallContext;
 import org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener;
 import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.shims.ShimLoader;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -62,7 +62,7 @@ public class TestMultiAuthorizationPreEventListener {
     System.setProperty(HiveConf.ConfVars.HIVE_METASTORE_AUTHENTICATOR_MANAGER.varname,
         HadoopDefaultMetastoreAuthenticator.class.getName());
 
-    MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
+    MetaStoreUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge());
 
     clientHiveConf = new HiveConf();
 

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
----------------------------------------------------------------------
diff --git a/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java b/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
index 71f9640..06ddc22 100644
--- a/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
+++ b/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.llap.LlapItUtils;
 import org.apache.hadoop.hive.llap.daemon.MiniLlapCluster;
 import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import org.apache.hadoop.hive.ql.exec.Utilities;
 import org.apache.hadoop.hive.ql.util.ZooKeeperHiveHelper;
 import org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim;
@@ -311,8 +312,7 @@ public class MiniHS2 extends AbstractHiveService {
     if (isMetastoreRemote) {
       int metaStorePort = MetaStoreUtils.findFreePort();
       getHiveConf().setVar(ConfVars.METASTOREURIS, "thrift://localhost:" + metaStorePort);
-      MetaStoreUtils.startMetaStore(metaStorePort,
-      ShimLoader.getHadoopThriftAuthBridge(), getHiveConf());
+      MetaStoreUtils.startMetaStore(metaStorePort, HadoopThriftAuthBridge.getBridge(), getHiveConf());
     }
 
     hiveServer2 = new HiveServer2();

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 370c169..df01b25 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -86,6 +86,7 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.io.HdfsUtils;
 import org.apache.hadoop.hive.metastore.api.*;
 import org.apache.hadoop.hive.metastore.events.AddForeignKeyEvent;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.metastore.events.AddIndexEvent;
 import org.apache.hadoop.hive.metastore.events.AddNotNullConstraintEvent;
 import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
@@ -127,17 +128,14 @@ import org.apache.hadoop.hive.metastore.events.PreReadTableEvent;
 import org.apache.hadoop.hive.metastore.filemeta.OrcFileMetadataHandler;
 import org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType;
 import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.apache.hadoop.hive.metastore.security.MetastoreDelegationTokenManager;
+import org.apache.hadoop.hive.metastore.security.TUGIContainingTransport;
 import org.apache.hadoop.hive.metastore.txn.TxnStore;
 import org.apache.hadoop.hive.metastore.txn.TxnUtils;
 import org.apache.hadoop.hive.serde2.Deserializer;
 import org.apache.hadoop.hive.serde2.SerDeException;
-import org.apache.hadoop.hive.shims.HadoopShims;
-import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.hive.shims.Utils;
-import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge;
-import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge.Server.ServerMode;
-import org.apache.hadoop.hive.thrift.HiveDelegationTokenManager;
-import org.apache.hadoop.hive.thrift.TUGIContainingTransport;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -204,7 +202,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
   public static final String PUBLIC = "public";
 
   private static HadoopThriftAuthBridge.Server saslServer;
-  private static HiveDelegationTokenManager delegationTokenManager;
+  private static MetastoreDelegationTokenManager delegationTokenManager;
   private static boolean useSasl;
 
   public static final String NO_FILTER_STRING = "";
@@ -2342,9 +2340,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         // This is not transactional
         for (Path location : getLocationsForTruncate(getMS(), dbName, tableName, tbl, partNames)) {
           FileSystem fs = location.getFileSystem(getHiveConf());
-          HadoopShims.HdfsEncryptionShim shim
-                  = ShimLoader.getHadoopShims().createHdfsEncryptionShim(fs, getHiveConf());
-          if (!shim.isPathEncrypted(location) && !FileUtils.pathHasSnapshotSubDir(location, fs)) {
+          if (!org.apache.hadoop.hive.metastore.utils.HdfsUtils.isPathEncrypted(getHiveConf(), fs.getUri(), location) &&
+              !FileUtils.pathHasSnapshotSubDir(location, fs)) {
             HdfsUtils.HadoopFileStatus status = new HdfsUtils.HadoopFileStatus(getHiveConf(), fs, location);
             FileStatus targetStatus = fs.getFileStatus(location);
             String targetGroup = targetStatus == null ? null : targetStatus.getGroup();
@@ -7565,7 +7562,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       Condition startCondition = startLock.newCondition();
       AtomicBoolean startedServing = new AtomicBoolean();
       startMetaStoreThreads(conf, startLock, startCondition, startedServing);
-      startMetaStore(cli.getPort(), ShimLoader.getHadoopThriftAuthBridge(), conf, startLock,
+      startMetaStore(cli.getPort(), HadoopThriftAuthBridge.getBridge(), conf, startLock,
           startCondition, startedServing);
     } catch (Throwable t) {
       // Catch the exception, log it and rethrow it.
@@ -7650,9 +7647,9 @@ public class HiveMetaStore extends ThriftHiveMetastore {
             conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_KEYTAB_FILE),
             conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL));
         // Start delegation token manager
-        delegationTokenManager = new HiveDelegationTokenManager();
+        delegationTokenManager = new MetastoreDelegationTokenManager();
         delegationTokenManager.startDelegationTokenSecretManager(conf, baseHandler,
-            ServerMode.METASTORE);
+            HadoopThriftAuthBridge.Server.ServerMode.METASTORE);
         saslServer.setSecretManager(delegationTokenManager.getSecretManager());
         transFactory = saslServer.createTransportFactory(
                 MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL));
@@ -7686,8 +7683,8 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           throw new IllegalArgumentException(ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PATH.varname
               + " Not configured for SSL connection");
         }
-        String keyStorePassword = ShimLoader.getHadoopShims().getPassword(conf,
-            HiveConf.ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PASSWORD.varname);
+        String keyStorePassword =
+            MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.SSL_KEYSTORE_PASSWORD);
 
         // enable SSL support for HMS
         List<String> sslVersionBlacklist = new ArrayList<String>();

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index b37c677..b1efb8c 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -60,11 +60,11 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
 import org.apache.hadoop.hive.conf.HiveConfUtil;
 import org.apache.hadoop.hive.metastore.api.*;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
 import org.apache.hadoop.hive.metastore.txn.TxnUtils;
-import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.hive.shims.Utils;
-import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.thrift.TApplicationException;
@@ -194,7 +194,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
           metastoreUris[i++] = new URI(
               tmpUri.getScheme(),
               tmpUri.getUserInfo(),
-              ShimLoader.getHadoopThriftAuthBridge().getCanonicalHostName(tmpUri.getHost()),
+              HadoopThriftAuthBridge.getBridge().getCanonicalHostName(tmpUri.getHost()),
               tmpUri.getPort(),
               tmpUri.getPath(),
               tmpUri.getQuery(),
@@ -422,8 +422,8 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
                 throw new IllegalArgumentException(ConfVars.HIVE_METASTORE_SSL_TRUSTSTORE_PATH.varname
                     + " Not configured for SSL connection");
               }
-              String trustStorePassword = ShimLoader.getHadoopShims().getPassword(conf,
-                  HiveConf.ConfVars.HIVE_METASTORE_SSL_TRUSTSTORE_PASSWORD.varname);
+              String trustStorePassword =
+                  MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.SSL_TRUSTSTORE_PASSWORD);
 
               // Create an SSL socket and connect
               transport = HiveAuthUtils.getSSLSocket(store.getHost(), store.getPort(), clientSocketTimeout, trustStorePath, trustStorePassword );
@@ -442,7 +442,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
             // Wrap thrift connection with SASL for secure connection.
             try {
               HadoopThriftAuthBridge.Client authBridge =
-                ShimLoader.getHadoopThriftAuthBridge().createClient();
+                HadoopThriftAuthBridge.getBridge().createClient();
 
               // check if we should use delegation tokens to authenticate
               // the call below gets hold of the tokens if they are set up by hadoop


[2/3] hive git commit: HIVE-17241 Change metastore classes to not use the shims. This closes #228. (Alan Gates, reviewed by Vaibhav Gumashta)

Posted by ga...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
index 6a54306..bbe13fd 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
@@ -65,6 +65,8 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.hive.metastore.api.Decimal;
 import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.metastore.api.SkewedInfo;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.apache.hadoop.hive.shims.ShimLoader;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -108,8 +110,6 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructField;
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
-import org.apache.hadoop.hive.shims.ShimLoader;
-import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge;
 import org.apache.hadoop.security.SaslRpcServer;
 import org.apache.hive.common.util.HiveStringUtils;
 import org.apache.hive.common.util.ReflectionUtil;
@@ -1258,7 +1258,7 @@ public class MetaStoreUtils {
   }
 
   public static int startMetaStore() throws Exception {
-    return startMetaStore(ShimLoader.getHadoopThriftAuthBridge(), null);
+    return startMetaStore(HadoopThriftAuthBridge.getBridge(), null);
   }
 
   public static int startMetaStore(final HadoopThriftAuthBridge bridge, HiveConf conf) throws Exception {
@@ -1268,7 +1268,7 @@ public class MetaStoreUtils {
   }
 
   public static int startMetaStore(HiveConf conf) throws Exception {
-    return startMetaStore(ShimLoader.getHadoopThriftAuthBridge(), conf);
+    return startMetaStore(HadoopThriftAuthBridge.getBridge(), conf);
   }
 
   public static void startMetaStore(final int port, final HadoopThriftAuthBridge bridge) throws Exception {

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 897fc4e..b878115 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -121,6 +121,7 @@ import org.apache.hadoop.hive.metastore.api.Type;
 import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
 import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.metastore.model.MColumnDescriptor;
 import org.apache.hadoop.hive.metastore.model.MConstraint;
 import org.apache.hadoop.hive.metastore.model.MDBPrivilege;
@@ -157,7 +158,6 @@ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder;
 import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
-import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hive.common.util.HiveStringUtils;
 import org.apache.thrift.TException;
@@ -493,8 +493,7 @@ public class ObjectStore implements RawStore, Configurable {
     }
     // Password may no longer be in the conf, use getPassword()
     try {
-      String passwd =
-          ShimLoader.getHadoopShims().getPassword(conf, HiveConf.ConfVars.METASTOREPWD.varname);
+      String passwd = MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.PWD);
       if (passwd != null && !passwd.isEmpty()) {
         prop.setProperty(HiveConf.ConfVars.METASTOREPWD.varname, passwd);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/metastore/src/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java b/metastore/src/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java
index 89f4701..64f0b96 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/TUGIBasedProcessor.java
@@ -25,13 +25,13 @@ import java.security.PrivilegedExceptionAction;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.hadoop.hive.metastore.security.TUGIContainingTransport;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface;
 import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.set_ugi_args;
 import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.set_ugi_result;
-import org.apache.hadoop.hive.thrift.TUGIContainingTransport;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.thrift.ProcessFunction;
 import org.apache.thrift.TApplicationException;

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java b/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
index 778550b..1dd50de 100755
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/Warehouse.java
@@ -33,6 +33,7 @@ import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
 import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.hive.metastore.utils.HdfsUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -52,7 +53,6 @@ import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.util.ReflectionUtils;
 
 /**
@@ -252,7 +252,7 @@ public class Warehouse {
     try {
       fs = getFs(path);
       stat = fs.getFileStatus(path);
-      ShimLoader.getHadoopShims().checkFileAccess(fs, stat, FsAction.WRITE);
+      HdfsUtils.checkFileAccess(fs, stat, FsAction.WRITE);
       return true;
     } catch (FileNotFoundException fnfe){
       // File named by path doesn't exist; nothing to validate.

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
index f72c379..0161894 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
@@ -28,10 +28,10 @@ import java.sql.Statement;
 import java.util.Properties;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.shims.ShimLoader;
 
 /**
  * Utility methods for creating and destroying txn database/schema, plus methods for
@@ -328,8 +328,7 @@ public final class TxnDbUtil {
     Properties prop = new Properties();
     String driverUrl = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORECONNECTURLKEY);
     String user = HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME);
-    String passwd =
-      ShimLoader.getHadoopShims().getPassword(conf, HiveConf.ConfVars.METASTOREPWD.varname);
+    String passwd = MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.PWD);
     prop.setProperty("user", user);
     prop.setProperty("password", passwd);
     Connection conn = driver.connect(driverUrl, prop);

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index b722af6..f3968e4 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hive.common.classification.RetrySemantics;
 import org.apache.hadoop.hive.metastore.DatabaseProduct;
 import org.apache.hadoop.hive.metastore.HouseKeeperService;
 import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.commons.dbcp.PoolingDataSource;
@@ -45,7 +46,6 @@ import org.apache.hadoop.hive.common.StringableMap;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.conf.HiveConfUtil;
 import org.apache.hadoop.hive.metastore.api.*;
-import org.apache.hadoop.hive.shims.ShimLoader;
 import org.apache.hadoop.util.StringUtils;
 
 import javax.sql.DataSource;
@@ -3701,8 +3701,7 @@ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
 
   private static String getMetastoreJdbcPasswd(HiveConf conf) throws SQLException {
     try {
-      return ShimLoader.getHadoopShims().getPassword(conf,
-          HiveConf.ConfVars.METASTOREPWD.varname);
+      return MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.PWD);
     } catch (IOException err) {
       throw new SQLException("Error getting metastore password", err);
     }

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/standalone-metastore/pom.xml
----------------------------------------------------------------------
diff --git a/standalone-metastore/pom.xml b/standalone-metastore/pom.xml
index b8826c6..2e0c51e 100644
--- a/standalone-metastore/pom.xml
+++ b/standalone-metastore/pom.xml
@@ -53,6 +53,22 @@
         </exclusion>
       </exclusions>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <version>${hadoop.version}</version>
+      <optional>true</optional>
+      <exclusions>
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-log4j12</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commmons-logging</groupId>
+          <artifactId>commons-logging</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
     <!-- This is our one and only Hive dependency.-->
     <dependency>
       <groupId>org.apache.hive</groupId>

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
index d8ec1d9..0fb878a 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hive.metastore.conf;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.security.MetastoreDelegationTokenManager;
 import org.apache.hadoop.hive.metastore.utils.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -361,6 +362,17 @@ public class MetastoreConf {
         "The default partition name in case the dynamic partition column value is null/empty string or any other values that cannot be escaped. \n" +
             "This value must not contain any special character used in HDFS URI (e.g., ':', '%', '/' etc). \n" +
             "The user has to be aware that the dynamic partition value should not contain this value to avoid confusions."),
+    DELEGATION_KEY_UPDATE_INTERVAL("metastore.cluster.delegation.key.update-interval",
+        "hive.cluster.delegation.key.update-interval", 1, TimeUnit.DAYS, ""),
+    DELEGATION_TOKEN_GC_INTERVAL("metastore.cluster.delegation.token.gc-interval",
+        "hive.cluster.delegation.token.gc-interval", 1, TimeUnit.HOURS, ""),
+    DELEGATION_TOKEN_MAX_LIFETIME("metastore.cluster.delegation.token.max-lifetime",
+        "hive.cluster.delegation.token.max-lifetime", 7, TimeUnit.DAYS, ""),
+    DELEGATION_TOKEN_RENEW_INTERVAL("metastore.cluster.delegation.token.renew-interval",
+      "hive.cluster.delegation.token.renew-interval", 1, TimeUnit.DAYS, ""),
+    DELEGATION_TOKEN_STORE_CLS("metastore.cluster.delegation.token.store.class",
+        "hive.cluster.delegation.token.store.class", MetastoreDelegationTokenManager.class.getName(),
+        "Class to store delegation tokens"),
     DETACH_ALL_ON_COMMIT("javax.jdo.option.DetachAllOnCommit",
         "javax.jdo.option.DetachAllOnCommit", true,
         "Detaches all objects from session so that they can be used after transaction is committed"),

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/DelegationTokenIdentifier.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/DelegationTokenIdentifier.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/DelegationTokenIdentifier.java
new file mode 100644
index 0000000..ba6c7e3
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/DelegationTokenIdentifier.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.security;
+
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+
+/**
+ * A delegation token identifier that is specific to Hive.
+ */
+public class DelegationTokenIdentifier
+    extends AbstractDelegationTokenIdentifier {
+  public static final Text HIVE_DELEGATION_KIND = new Text("HIVE_DELEGATION_TOKEN");
+
+  /**
+   * Create an empty delegation token identifier for reading into.
+   */
+  public DelegationTokenIdentifier() {
+  }
+
+  /**
+   * Create a new delegation token identifier
+   * @param owner the effective username of the token owner
+   * @param renewer the username of the renewer
+   * @param realUser the real username of the token owner
+   */
+  public DelegationTokenIdentifier(Text owner, Text renewer, Text realUser) {
+    super(owner, renewer, realUser);
+  }
+
+  @Override
+  public Text getKind() {
+    return HIVE_DELEGATION_KIND;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/DelegationTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/DelegationTokenSecretManager.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/DelegationTokenSecretManager.java
new file mode 100644
index 0000000..aae96a5
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/DelegationTokenSecretManager.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.security;
+
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
+
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
+
+/**
+ * A Hive specific delegation token secret manager.
+ * The secret manager is responsible for generating and accepting the password
+ * for each token.
+ */
+public class DelegationTokenSecretManager
+    extends AbstractDelegationTokenSecretManager<DelegationTokenIdentifier> {
+
+  /**
+   * Create a secret manager
+   * @param delegationKeyUpdateInterval the number of seconds for rolling new
+   *        secret keys.
+   * @param delegationTokenMaxLifetime the maximum lifetime of the delegation
+   *        tokens
+   * @param delegationTokenRenewInterval how often the tokens must be renewed
+   * @param delegationTokenRemoverScanInterval how often the tokens are scanned
+   *        for expired tokens
+   */
+  public DelegationTokenSecretManager(long delegationKeyUpdateInterval,
+                                      long delegationTokenMaxLifetime,
+                                      long delegationTokenRenewInterval,
+                                      long delegationTokenRemoverScanInterval) {
+    super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
+          delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
+  }
+
+  @Override
+  public DelegationTokenIdentifier createIdentifier() {
+    return new DelegationTokenIdentifier();
+  }
+
+  /**
+   * Verify token string
+   * @param tokenStrForm
+   * @return user name
+   * @throws IOException
+   */
+  public synchronized String verifyDelegationToken(String tokenStrForm) throws IOException {
+    Token<DelegationTokenIdentifier> t = new Token<>();
+    t.decodeFromUrlString(tokenStrForm);
+
+    DelegationTokenIdentifier id = getTokenIdentifier(t);
+    verifyToken(id, t.getPassword());
+    return id.getUser().getShortUserName();
+  }
+
+  protected DelegationTokenIdentifier getTokenIdentifier(Token<DelegationTokenIdentifier> token)
+      throws IOException {
+    // turn bytes back into identifier for cache lookup
+    ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
+    DataInputStream in = new DataInputStream(buf);
+    DelegationTokenIdentifier id = createIdentifier();
+    id.readFields(in);
+    return id;
+  }
+
+  public synchronized void cancelDelegationToken(String tokenStrForm) throws IOException {
+    Token<DelegationTokenIdentifier> t= new Token<>();
+    t.decodeFromUrlString(tokenStrForm);
+    String user = UserGroupInformation.getCurrentUser().getUserName();
+    cancelToken(t, user);
+  }
+
+  public synchronized long renewDelegationToken(String tokenStrForm) throws IOException {
+    Token<DelegationTokenIdentifier> t= new Token<>();
+    t.decodeFromUrlString(tokenStrForm);
+    String user = UserGroupInformation.getCurrentUser().getUserName();
+    return renewToken(t, user);
+  }
+
+  public synchronized String getDelegationToken(String renewer) throws IOException {
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    Text owner = new Text(ugi.getUserName());
+    Text realUser = null;
+    if (ugi.getRealUser() != null) {
+      realUser = new Text(ugi.getRealUser().getUserName());
+    }
+    DelegationTokenIdentifier ident =
+      new DelegationTokenIdentifier(owner, new Text(renewer), realUser);
+    Token<DelegationTokenIdentifier> t = new Token<>(
+        ident, this);
+    return t.encodeToUrlString();
+  }
+
+  public String getUserFromToken(String tokenStr) throws IOException {
+    Token<DelegationTokenIdentifier> delegationToken = new Token<>();
+    delegationToken.decodeFromUrlString(tokenStr);
+
+    ByteArrayInputStream buf = new ByteArrayInputStream(delegationToken.getIdentifier());
+    DataInputStream in = new DataInputStream(buf);
+    DelegationTokenIdentifier id = createIdentifier();
+    id.readFields(in);
+    return id.getUser().getShortUserName();
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/DelegationTokenStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/DelegationTokenStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/DelegationTokenStore.java
new file mode 100644
index 0000000..0cafeff
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/DelegationTokenStore.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.security;
+
+import java.io.Closeable;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge.Server.ServerMode;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager.DelegationTokenInformation;
+
+/**
+ * Interface for pluggable token store that can be implemented with shared external
+ * storage for load balancing and high availability (for example using ZooKeeper).
+ * Internal, store specific errors are translated into {@link TokenStoreException}.
+ */
+public interface DelegationTokenStore extends Configurable, Closeable {
+
+  /**
+   * Exception for internal token store errors that typically cannot be handled by the caller.
+   */
+  class TokenStoreException extends RuntimeException {
+    private static final long serialVersionUID = -8693819817623074083L;
+
+    public TokenStoreException(Throwable cause) {
+      super(cause);
+    }
+
+    public TokenStoreException(String message, Throwable cause) {
+      super(message, cause);
+    }
+  }
+
+  /**
+   * Add new master key. The token store assigns and returns the sequence number.
+   * Caller needs to use the identifier to update the key (since it is embedded in the key).
+   *
+   * @param s
+   * @return sequence number for new key
+   */
+  int addMasterKey(String s) throws TokenStoreException;
+
+  /**
+   * Update master key (for expiration and setting store assigned sequence within key)
+   * @param keySeq
+   * @param s
+   * @throws TokenStoreException
+   */
+  void updateMasterKey(int keySeq, String s) throws TokenStoreException;
+
+  /**
+   * Remove key for given id.
+   * @param keySeq
+   * @return false if key no longer present, true otherwise.
+   */
+  boolean removeMasterKey(int keySeq);
+
+  /**
+   * Return all master keys.
+   * @return
+   * @throws TokenStoreException
+   */
+  String[] getMasterKeys() throws TokenStoreException;
+
+  /**
+   * Add token. If identifier is already present, token won't be added.
+   * @param tokenIdentifier
+   * @param token
+   * @return true if token was added, false for existing identifier
+   */
+  boolean addToken(DelegationTokenIdentifier tokenIdentifier,
+      DelegationTokenInformation token) throws TokenStoreException;
+
+  /**
+   * Get token. Returns null if the token does not exist.
+   * @param tokenIdentifier
+   * @return
+   */
+  DelegationTokenInformation getToken(DelegationTokenIdentifier tokenIdentifier)
+      throws TokenStoreException;
+
+  /**
+   * Remove token. Return value can be used by caller to detect concurrency.
+   * @param tokenIdentifier
+   * @return true if token was removed, false if it was already removed.
+   * @throws TokenStoreException
+   */
+  boolean removeToken(DelegationTokenIdentifier tokenIdentifier) throws TokenStoreException;
+
+  /**
+   * List of all token identifiers in the store. This is used to remove expired tokens
+   * and a potential scalability improvement would be to partition by master key id
+   * @return
+   */
+  List<DelegationTokenIdentifier> getAllDelegationTokenIdentifiers() throws TokenStoreException;
+
+  /**
+   * @param hmsHandler ObjectStore used by DBTokenStore
+   * @param smode Indicate whether this is a metastore or hiveserver2 token store
+   */
+  void init(Object hmsHandler, ServerMode smode);
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/HadoopThriftAuthBridge.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/HadoopThriftAuthBridge.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/HadoopThriftAuthBridge.java
new file mode 100644
index 0000000..3f02ffd
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/HadoopThriftAuthBridge.java
@@ -0,0 +1,684 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.security;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.Socket;
+import java.net.UnknownHostException;
+import java.security.PrivilegedAction;
+import java.security.PrivilegedExceptionAction;
+import java.util.Locale;
+import java.util.Map;
+
+import javax.security.auth.callback.Callback;
+import javax.security.auth.callback.CallbackHandler;
+import javax.security.auth.callback.NameCallback;
+import javax.security.auth.callback.PasswordCallback;
+import javax.security.auth.callback.UnsupportedCallbackException;
+import javax.security.sasl.AuthorizeCallback;
+import javax.security.sasl.RealmCallback;
+import javax.security.sasl.RealmChoiceCallback;
+import javax.security.sasl.SaslException;
+import javax.security.sasl.SaslServer;
+
+import org.apache.commons.codec.binary.Base64;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.security.SaslRpcServer;
+import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.thrift.TException;
+import org.apache.thrift.TProcessor;
+import org.apache.thrift.protocol.TProtocol;
+import org.apache.thrift.transport.TSaslClientTransport;
+import org.apache.thrift.transport.TSaslServerTransport;
+import org.apache.thrift.transport.TSocket;
+import org.apache.thrift.transport.TTransport;
+import org.apache.thrift.transport.TTransportException;
+import org.apache.thrift.transport.TTransportFactory;
+
+/**
+ * Functions that bridge Thrift's SASL transports to Hadoop's
+ * SASL callback handlers and authentication classes.
+ * HIVE-11378 This class is not directly used anymore.  It now exists only as a shell to be
+ * extended by HadoopThriftAuthBridge23 in 0.23 shims.  I have made it abstract
+ * to avoid maintenance errors.
+ */
+public abstract class HadoopThriftAuthBridge {
+  private static final Logger LOG = LoggerFactory.getLogger(HadoopThriftAuthBridge.class);
+
+  // We want to have only one auth bridge.  In the past this was handled by ShimLoader, but since
+  // we're no longer using that we'll do it here.
+  private static HadoopThriftAuthBridge self = null;
+
+  public static HadoopThriftAuthBridge getBridge() {
+    if (self == null) {
+      synchronized (HadoopThriftAuthBridge.class) {
+        if (self == null) self = new HadoopThriftAuthBridge23();
+      }
+    }
+    return self;
+  }
+
+  public Client createClient() {
+    return new Client();
+  }
+
+  public Client createClientWithConf(String authMethod) {
+    UserGroupInformation ugi;
+    try {
+      ugi = UserGroupInformation.getLoginUser();
+    } catch(IOException e) {
+      throw new IllegalStateException("Unable to get current login user: " + e, e);
+    }
+    if (loginUserHasCurrentAuthMethod(ugi, authMethod)) {
+      LOG.debug("Not setting UGI conf as passed-in authMethod of " + authMethod + " = current.");
+      return new Client();
+    } else {
+      LOG.debug("Setting UGI conf as passed-in authMethod of " + authMethod + " != current.");
+      Configuration conf = new Configuration();
+      conf.set(HADOOP_SECURITY_AUTHENTICATION, authMethod);
+      UserGroupInformation.setConfiguration(conf);
+      return new Client();
+    }
+  }
+
+  public Server createServer(String keytabFile, String principalConf) throws TTransportException {
+    return new Server(keytabFile, principalConf);
+  }
+
+
+  public String getServerPrincipal(String principalConfig, String host)
+      throws IOException {
+    String serverPrincipal = SecurityUtil.getServerPrincipal(principalConfig, host);
+    String names[] = SaslRpcServer.splitKerberosName(serverPrincipal);
+    if (names.length != 3) {
+      throw new IOException(
+          "Kerberos principal name does NOT have the expected hostname part: "
+              + serverPrincipal);
+    }
+    return serverPrincipal;
+  }
+
+  /**
+   * Method to get canonical-ized hostname, given a hostname (possibly a CNAME).
+   * This should allow for service-principals to use simplified CNAMEs.
+   * @param hostName The hostname to be canonical-ized.
+   * @return Given a CNAME, the canonical-ized hostname is returned. If not found, the original hostname is returned.
+   */
+  public String getCanonicalHostName(String hostName) {
+    try {
+      return InetAddress.getByName(hostName).getCanonicalHostName();
+    }
+    catch(UnknownHostException exception) {
+      LOG.warn("Could not retrieve canonical hostname for " + hostName, exception);
+      return hostName;
+    }
+  }
+
+  public UserGroupInformation getCurrentUGIWithConf(String authMethod)
+      throws IOException {
+    UserGroupInformation ugi;
+    try {
+      ugi = UserGroupInformation.getCurrentUser();
+    } catch(IOException e) {
+      throw new IllegalStateException("Unable to get current user: " + e, e);
+    }
+    if (loginUserHasCurrentAuthMethod(ugi, authMethod)) {
+      LOG.debug("Not setting UGI conf as passed-in authMethod of " + authMethod + " = current.");
+      return ugi;
+    } else {
+      LOG.debug("Setting UGI conf as passed-in authMethod of " + authMethod + " != current.");
+      Configuration conf = new Configuration();
+      conf.set(HADOOP_SECURITY_AUTHENTICATION, authMethod);
+      UserGroupInformation.setConfiguration(conf);
+      return UserGroupInformation.getCurrentUser();
+    }
+  }
+
+  /**
+   * Return true if the current login user is already using the given authMethod.
+   *
+   * Used above to ensure we do not create a new Configuration object and as such
+   * lose other settings such as the cluster to which the JVM is connected. Required
+   * for oozie since it does not have a core-site.xml see HIVE-7682
+   */
+  private boolean loginUserHasCurrentAuthMethod(UserGroupInformation ugi, String sAuthMethod) {
+    AuthenticationMethod authMethod;
+    try {
+      // based on SecurityUtil.getAuthenticationMethod()
+      authMethod = Enum.valueOf(AuthenticationMethod.class, sAuthMethod.toUpperCase(Locale.ENGLISH));
+    } catch (IllegalArgumentException iae) {
+      throw new IllegalArgumentException("Invalid attribute value for " +
+          HADOOP_SECURITY_AUTHENTICATION + " of " + sAuthMethod, iae);
+    }
+    LOG.debug("Current authMethod = " + ugi.getAuthenticationMethod());
+    return ugi.getAuthenticationMethod().equals(authMethod);
+  }
+
+
+  /**
+   * Read and return Hadoop SASL configuration which can be configured using
+   * "hadoop.rpc.protection"
+   * @param conf
+   * @return Hadoop SASL configuration
+   */
+
+  public abstract Map<String, String> getHadoopSaslProperties(Configuration conf);
+
+  public static class Client {
+    /**
+     * Create a client-side SASL transport that wraps an underlying transport.
+     *
+     * @param methodStr The authentication method to use. Currently only KERBEROS is
+     *               supported.
+     * @param principalConfig The Kerberos principal of the target server.
+     * @param underlyingTransport The underlying transport mechanism, usually a TSocket.
+     * @param saslProps the sasl properties to create the client with
+     */
+
+
+    public TTransport createClientTransport(
+        String principalConfig, String host,
+        String methodStr, String tokenStrForm, final TTransport underlyingTransport,
+        final Map<String, String> saslProps) throws IOException {
+      final AuthMethod method = AuthMethod.valueOf(AuthMethod.class, methodStr);
+
+      TTransport saslTransport = null;
+      switch (method) {
+      case DIGEST:
+        Token<DelegationTokenIdentifier> t= new Token<>();
+        t.decodeFromUrlString(tokenStrForm);
+        saslTransport = new TSaslClientTransport(
+            method.getMechanismName(),
+            null,
+            null, SaslRpcServer.SASL_DEFAULT_REALM,
+            saslProps, new SaslClientCallbackHandler(t),
+            underlyingTransport);
+        return new TUGIAssumingTransport(saslTransport, UserGroupInformation.getCurrentUser());
+
+      case KERBEROS:
+        String serverPrincipal = SecurityUtil.getServerPrincipal(principalConfig, host);
+        final String names[] = SaslRpcServer.splitKerberosName(serverPrincipal);
+        if (names.length != 3) {
+          throw new IOException(
+              "Kerberos principal name does NOT have the expected hostname part: "
+                  + serverPrincipal);
+        }
+        try {
+          return UserGroupInformation.getCurrentUser().doAs(
+              new PrivilegedExceptionAction<TUGIAssumingTransport>() {
+                @Override
+                public TUGIAssumingTransport run() throws IOException {
+                  TTransport saslTransport = new TSaslClientTransport(
+                    method.getMechanismName(),
+                    null,
+                    names[0], names[1],
+                    saslProps, null,
+                    underlyingTransport);
+                  return new TUGIAssumingTransport(saslTransport, UserGroupInformation.getCurrentUser());
+                }
+              });
+        } catch (InterruptedException | SaslException se) {
+          throw new IOException("Could not instantiate SASL transport", se);
+        }
+
+      default:
+        throw new IOException("Unsupported authentication method: " + method);
+      }
+    }
+    private static class SaslClientCallbackHandler implements CallbackHandler {
+      private final String userName;
+      private final char[] userPassword;
+
+      public SaslClientCallbackHandler(Token<? extends TokenIdentifier> token) {
+        this.userName = encodeIdentifier(token.getIdentifier());
+        this.userPassword = encodePassword(token.getPassword());
+      }
+
+
+      @Override
+      public void handle(Callback[] callbacks)
+          throws UnsupportedCallbackException {
+        NameCallback nc = null;
+        PasswordCallback pc = null;
+        RealmCallback rc = null;
+        for (Callback callback : callbacks) {
+          if (callback instanceof RealmChoiceCallback) {
+            continue;
+          } else if (callback instanceof NameCallback) {
+            nc = (NameCallback) callback;
+          } else if (callback instanceof PasswordCallback) {
+            pc = (PasswordCallback) callback;
+          } else if (callback instanceof RealmCallback) {
+            rc = (RealmCallback) callback;
+          } else {
+            throw new UnsupportedCallbackException(callback,
+                "Unrecognized SASL client callback");
+          }
+        }
+        if (nc != null) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("SASL client callback: setting username: " + userName);
+          }
+          nc.setName(userName);
+        }
+        if (pc != null) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("SASL client callback: setting userPassword");
+          }
+          pc.setPassword(userPassword);
+        }
+        if (rc != null) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("SASL client callback: setting realm: "
+                + rc.getDefaultText());
+          }
+          rc.setText(rc.getDefaultText());
+        }
+      }
+
+      static String encodeIdentifier(byte[] identifier) {
+        return new String(Base64.encodeBase64(identifier));
+      }
+
+      static char[] encodePassword(byte[] password) {
+        return new String(Base64.encodeBase64(password)).toCharArray();
+      }
+    }
+  }
+
+  public static class Server {
+    public enum ServerMode {
+      HIVESERVER2, METASTORE
+    };
+
+    protected final UserGroupInformation realUgi;
+    protected DelegationTokenSecretManager secretManager;
+
+    public Server() throws TTransportException {
+      try {
+        realUgi = UserGroupInformation.getCurrentUser();
+      } catch (IOException ioe) {
+        throw new TTransportException(ioe);
+      }
+    }
+    /**
+     * Create a server with a kerberos keytab/principal.
+     */
+    protected Server(String keytabFile, String principalConf)
+        throws TTransportException {
+      if (keytabFile == null || keytabFile.isEmpty()) {
+        throw new TTransportException("No keytab specified");
+      }
+      if (principalConf == null || principalConf.isEmpty()) {
+        throw new TTransportException("No principal specified");
+      }
+
+      // Login from the keytab
+      String kerberosName;
+      try {
+        kerberosName =
+            SecurityUtil.getServerPrincipal(principalConf, "0.0.0.0");
+        UserGroupInformation.loginUserFromKeytab(
+            kerberosName, keytabFile);
+        realUgi = UserGroupInformation.getLoginUser();
+        assert realUgi.isFromKeytab();
+      } catch (IOException ioe) {
+        throw new TTransportException(ioe);
+      }
+    }
+
+    public void setSecretManager(DelegationTokenSecretManager secretManager) {
+      this.secretManager = secretManager;
+    }
+
+    /**
+     * Create a TTransportFactory that, upon connection of a client socket,
+     * negotiates a Kerberized SASL transport. The resulting TTransportFactory
+     * can be passed as both the input and output transport factory when
+     * instantiating a TThreadPoolServer, for example.
+     *
+     * @param saslProps Map of SASL properties
+     */
+
+    public TTransportFactory createTransportFactory(Map<String, String> saslProps)
+        throws TTransportException {
+
+      TSaslServerTransport.Factory transFactory = createSaslServerTransportFactory(saslProps);
+
+      return new TUGIAssumingTransportFactory(transFactory, realUgi);
+    }
+
+    /**
+     * Create a TSaslServerTransport.Factory that, upon connection of a client
+     * socket, negotiates a Kerberized SASL transport.
+     *
+     * @param saslProps Map of SASL properties
+     */
+    public TSaslServerTransport.Factory createSaslServerTransportFactory(
+        Map<String, String> saslProps) throws TTransportException {
+      // Parse out the kerberos principal, host, realm.
+      String kerberosName = realUgi.getUserName();
+      final String names[] = SaslRpcServer.splitKerberosName(kerberosName);
+      if (names.length != 3) {
+        throw new TTransportException("Kerberos principal should have 3 parts: " + kerberosName);
+      }
+
+      TSaslServerTransport.Factory transFactory = new TSaslServerTransport.Factory();
+      transFactory.addServerDefinition(
+          AuthMethod.KERBEROS.getMechanismName(),
+          names[0], names[1],  // two parts of kerberos principal
+          saslProps,
+          new SaslRpcServer.SaslGssCallbackHandler());
+      transFactory.addServerDefinition(AuthMethod.DIGEST.getMechanismName(),
+          null, SaslRpcServer.SASL_DEFAULT_REALM,
+          saslProps, new SaslDigestCallbackHandler(secretManager));
+
+      return transFactory;
+    }
+
+    /**
+     * Wrap a TTransportFactory in such a way that, before processing any RPC, it
+     * assumes the UserGroupInformation of the user authenticated by
+     * the SASL transport.
+     */
+    public TTransportFactory wrapTransportFactory(TTransportFactory transFactory) {
+      return new TUGIAssumingTransportFactory(transFactory, realUgi);
+    }
+
+    /**
+     * Wrap a TProcessor in such a way that, before processing any RPC, it
+     * assumes the UserGroupInformation of the user authenticated by
+     * the SASL transport.
+     */
+
+    public TProcessor wrapProcessor(TProcessor processor) {
+      return new TUGIAssumingProcessor(processor, secretManager, true);
+    }
+
+    /**
+     * Wrap a TProcessor to capture the client information like connecting userid, ip etc
+     */
+
+    public TProcessor wrapNonAssumingProcessor(TProcessor processor) {
+      return new TUGIAssumingProcessor(processor, secretManager, false);
+    }
+
+    final static ThreadLocal<InetAddress> remoteAddress =
+        new ThreadLocal<InetAddress>() {
+
+      @Override
+      protected InetAddress initialValue() {
+        return null;
+      }
+    };
+
+    public InetAddress getRemoteAddress() {
+      return remoteAddress.get();
+    }
+
+    final static ThreadLocal<AuthenticationMethod> authenticationMethod =
+        new ThreadLocal<AuthenticationMethod>() {
+
+      @Override
+      protected AuthenticationMethod initialValue() {
+        return AuthenticationMethod.TOKEN;
+      }
+    };
+
+    private static ThreadLocal<String> remoteUser = new ThreadLocal<String> () {
+
+      @Override
+      protected String initialValue() {
+        return null;
+      }
+    };
+
+
+    public String getRemoteUser() {
+      return remoteUser.get();
+    }
+
+    private final static ThreadLocal<String> userAuthMechanism =
+        new ThreadLocal<String>() {
+
+      @Override
+      protected String initialValue() {
+        return AuthMethod.KERBEROS.getMechanismName();
+      }
+    };
+
+    public String getUserAuthMechanism() {
+      return userAuthMechanism.get();
+    }
+    /** CallbackHandler for SASL DIGEST-MD5 mechanism */
+    // This code is pretty much completely based on Hadoop's
+    // SaslRpcServer.SaslDigestCallbackHandler - the only reason we could not
+    // use that Hadoop class as-is was because it needs a Server.Connection object
+    // which is relevant in hadoop rpc but not here in the metastore - so the
+    // code below does not deal with the Connection Server.object.
+    static class SaslDigestCallbackHandler implements CallbackHandler {
+      private final DelegationTokenSecretManager secretManager;
+
+      public SaslDigestCallbackHandler(
+          DelegationTokenSecretManager secretManager) {
+        this.secretManager = secretManager;
+      }
+
+      private char[] getPassword(DelegationTokenIdentifier tokenid) throws InvalidToken {
+        return encodePassword(secretManager.retrievePassword(tokenid));
+      }
+
+      private char[] encodePassword(byte[] password) {
+        return new String(Base64.encodeBase64(password)).toCharArray();
+      }
+      /** {@inheritDoc} */
+
+      @Override
+      public void handle(Callback[] callbacks) throws InvalidToken,
+      UnsupportedCallbackException {
+        NameCallback nc = null;
+        PasswordCallback pc = null;
+        AuthorizeCallback ac = null;
+        for (Callback callback : callbacks) {
+          if (callback instanceof AuthorizeCallback) {
+            ac = (AuthorizeCallback) callback;
+          } else if (callback instanceof NameCallback) {
+            nc = (NameCallback) callback;
+          } else if (callback instanceof PasswordCallback) {
+            pc = (PasswordCallback) callback;
+          } else if (callback instanceof RealmCallback) {
+            continue; // realm is ignored
+          } else {
+            throw new UnsupportedCallbackException(callback,
+                "Unrecognized SASL DIGEST-MD5 Callback");
+          }
+        }
+        if (pc != null) {
+          DelegationTokenIdentifier tokenIdentifier = SaslRpcServer.
+              getIdentifier(nc.getDefaultName(), secretManager);
+          char[] password = getPassword(tokenIdentifier);
+
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("SASL server DIGEST-MD5 callback: setting password "
+                + "for client: " + tokenIdentifier.getUser());
+          }
+          pc.setPassword(password);
+        }
+        if (ac != null) {
+          String authid = ac.getAuthenticationID();
+          String authzid = ac.getAuthorizationID();
+          if (authid.equals(authzid)) {
+            ac.setAuthorized(true);
+          } else {
+            ac.setAuthorized(false);
+          }
+          if (ac.isAuthorized()) {
+            if (LOG.isDebugEnabled()) {
+              String username =
+                  SaslRpcServer.getIdentifier(authzid, secretManager).getUser().getUserName();
+              LOG.debug("SASL server DIGEST-MD5 callback: setting "
+                  + "canonicalized client ID: " + username);
+            }
+            ac.setAuthorizedID(authzid);
+          }
+        }
+      }
+    }
+
+    /**
+     * Processor that pulls the SaslServer object out of the transport, and
+     * assumes the remote user's UGI before calling through to the original
+     * processor.
+     *
+     * This is used on the server side to set the UGI for each specific call.
+     */
+    protected class TUGIAssumingProcessor implements TProcessor {
+      final TProcessor wrapped;
+      DelegationTokenSecretManager secretManager;
+      boolean useProxy;
+      TUGIAssumingProcessor(TProcessor wrapped, DelegationTokenSecretManager secretManager,
+          boolean useProxy) {
+        this.wrapped = wrapped;
+        this.secretManager = secretManager;
+        this.useProxy = useProxy;
+      }
+
+
+      @Override
+      public boolean process(final TProtocol inProt, final TProtocol outProt) throws TException {
+        TTransport trans = inProt.getTransport();
+        if (!(trans instanceof TSaslServerTransport)) {
+          throw new TException("Unexpected non-SASL transport " + trans.getClass());
+        }
+        TSaslServerTransport saslTrans = (TSaslServerTransport)trans;
+        SaslServer saslServer = saslTrans.getSaslServer();
+        String authId = saslServer.getAuthorizationID();
+        LOG.debug("AUTH ID ======>" + authId);
+        String endUser = authId;
+
+        Socket socket = ((TSocket)(saslTrans.getUnderlyingTransport())).getSocket();
+        remoteAddress.set(socket.getInetAddress());
+
+        String mechanismName = saslServer.getMechanismName();
+        userAuthMechanism.set(mechanismName);
+        if (AuthMethod.PLAIN.getMechanismName().equalsIgnoreCase(mechanismName)) {
+          remoteUser.set(endUser);
+          return wrapped.process(inProt, outProt);
+        }
+
+        authenticationMethod.set(AuthenticationMethod.KERBEROS);
+        if(AuthMethod.TOKEN.getMechanismName().equalsIgnoreCase(mechanismName)) {
+          try {
+            TokenIdentifier tokenId = SaslRpcServer.getIdentifier(authId,
+                secretManager);
+            endUser = tokenId.getUser().getUserName();
+            authenticationMethod.set(AuthenticationMethod.TOKEN);
+          } catch (InvalidToken e) {
+            throw new TException(e.getMessage());
+          }
+        }
+
+        UserGroupInformation clientUgi = null;
+        try {
+          if (useProxy) {
+            clientUgi = UserGroupInformation.createProxyUser(
+                endUser, UserGroupInformation.getLoginUser());
+            remoteUser.set(clientUgi.getShortUserName());
+            LOG.debug("Set remoteUser :" + remoteUser.get());
+            return clientUgi.doAs(new PrivilegedExceptionAction<Boolean>() {
+
+              @Override
+              public Boolean run() {
+                try {
+                  return wrapped.process(inProt, outProt);
+                } catch (TException te) {
+                  throw new RuntimeException(te);
+                }
+              }
+            });
+          } else {
+            // use the short user name for the request
+            UserGroupInformation endUserUgi = UserGroupInformation.createRemoteUser(endUser);
+            remoteUser.set(endUserUgi.getShortUserName());
+            LOG.debug("Set remoteUser :" + remoteUser.get() + ", from endUser :" + endUser);
+            return wrapped.process(inProt, outProt);
+          }
+        } catch (RuntimeException rte) {
+          if (rte.getCause() instanceof TException) {
+            throw (TException)rte.getCause();
+          }
+          throw rte;
+        } catch (InterruptedException ie) {
+          throw new RuntimeException(ie); // unexpected!
+        } catch (IOException ioe) {
+          throw new RuntimeException(ioe); // unexpected!
+        }
+        finally {
+          if (clientUgi != null) {
+            try { FileSystem.closeAllForUGI(clientUgi); }
+            catch(IOException exception) {
+              LOG.error("Could not clean up file-system handles for UGI: " + clientUgi, exception);
+            }
+          }
+        }
+      }
+    }
+
+    /**
+     * A TransportFactory that wraps another one, but assumes a specified UGI
+     * before calling through.
+     *
+     * This is used on the server side to assume the server's Principal when accepting
+     * clients.
+     */
+    static class TUGIAssumingTransportFactory extends TTransportFactory {
+      private final UserGroupInformation ugi;
+      private final TTransportFactory wrapped;
+
+      public TUGIAssumingTransportFactory(TTransportFactory wrapped, UserGroupInformation ugi) {
+        assert wrapped != null;
+        assert ugi != null;
+        this.wrapped = wrapped;
+        this.ugi = ugi;
+      }
+
+
+      @Override
+      public TTransport getTransport(final TTransport trans) {
+        return ugi.doAs(new PrivilegedAction<TTransport>() {
+          @Override
+          public TTransport run() {
+            return wrapped.getTransport(trans);
+          }
+        });
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/HadoopThriftAuthBridge23.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/HadoopThriftAuthBridge23.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/HadoopThriftAuthBridge23.java
new file mode 100644
index 0000000..dc76535
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/HadoopThriftAuthBridge23.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.security;
+
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.SaslRpcServer;
+
+/**
+ * Functions that bridge Thrift's SASL transports to Hadoop's SASL callback
+ * handlers and authentication classes.
+ *
+ * This is a 0.23/2.x specific implementation
+ */
+public class HadoopThriftAuthBridge23 extends HadoopThriftAuthBridge {
+
+  private static Field SASL_PROPS_FIELD;
+  private static Class<?> SASL_PROPERTIES_RESOLVER_CLASS;
+  private static Method RES_GET_INSTANCE_METHOD;
+  private static Method GET_DEFAULT_PROP_METHOD;
+  static {
+    SASL_PROPERTIES_RESOLVER_CLASS = null;
+    SASL_PROPS_FIELD = null;
+    final String SASL_PROP_RES_CLASSNAME = "org.apache.hadoop.security.SaslPropertiesResolver";
+    try {
+      SASL_PROPERTIES_RESOLVER_CLASS = Class.forName(SASL_PROP_RES_CLASSNAME);
+
+    } catch (ClassNotFoundException e) {
+    }
+
+    if (SASL_PROPERTIES_RESOLVER_CLASS != null) {
+      // found the class, so this would be hadoop version 2.4 or newer (See
+      // HADOOP-10221, HADOOP-10451)
+      try {
+        RES_GET_INSTANCE_METHOD = SASL_PROPERTIES_RESOLVER_CLASS.getMethod("getInstance",
+            Configuration.class);
+        GET_DEFAULT_PROP_METHOD = SASL_PROPERTIES_RESOLVER_CLASS.getMethod("getDefaultProperties");
+      } catch (Exception e) {
+        // this must be hadoop 2.4 , where getDefaultProperties was protected
+      }
+    }
+
+    if (SASL_PROPERTIES_RESOLVER_CLASS == null || GET_DEFAULT_PROP_METHOD == null) {
+      // this must be a hadoop 2.4 version or earlier.
+      // Resorting to the earlier method of getting the properties, which uses SASL_PROPS field
+      try {
+        SASL_PROPS_FIELD = SaslRpcServer.class.getField("SASL_PROPS");
+      } catch (NoSuchFieldException e) {
+        // Older version of hadoop should have had this field
+        throw new IllegalStateException("Error finding hadoop SASL_PROPS field in "
+            + SaslRpcServer.class.getSimpleName(), e);
+      }
+    }
+  }
+
+  // TODO RIVEN switch this back to package level when we can move TestHadoopAuthBridge23 into
+  // riven.
+  // Package permission so that HadoopThriftAuthBridge can construct it but others cannot.
+  protected HadoopThriftAuthBridge23() {
+
+  }
+
+  /**
+   * Read and return Hadoop SASL configuration which can be configured using
+   * "hadoop.rpc.protection"
+   *
+   * @param conf
+   * @return Hadoop SASL configuration
+   */
+  @SuppressWarnings("unchecked")
+  @Override
+  public Map<String, String> getHadoopSaslProperties(Configuration conf) {
+    if (SASL_PROPS_FIELD != null) {
+      // hadoop 2.4 and earlier way of finding the sasl property settings
+      // Initialize the SaslRpcServer to ensure QOP parameters are read from
+      // conf
+      SaslRpcServer.init(conf);
+      try {
+        return (Map<String, String>) SASL_PROPS_FIELD.get(null);
+      } catch (Exception e) {
+        throw new IllegalStateException("Error finding hadoop SASL properties", e);
+      }
+    }
+    // 2.5 and later way of finding sasl property
+    try {
+      Configurable saslPropertiesResolver = (Configurable) RES_GET_INSTANCE_METHOD.invoke(null,
+          conf);
+      saslPropertiesResolver.setConf(conf);
+      return (Map<String, String>) GET_DEFAULT_PROP_METHOD.invoke(saslPropertiesResolver);
+    } catch (Exception e) {
+      throw new IllegalStateException("Error finding hadoop SASL properties", e);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/MemoryTokenStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/MemoryTokenStore.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/MemoryTokenStore.java
new file mode 100644
index 0000000..c484cd3
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/MemoryTokenStore.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.security;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge.Server.ServerMode;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager.DelegationTokenInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Default in-memory token store implementation.
+ */
+public class MemoryTokenStore implements DelegationTokenStore {
+  private static final Logger LOG = LoggerFactory.getLogger(MemoryTokenStore.class);
+
+  private final Map<Integer, String> masterKeys = new ConcurrentHashMap<>();
+
+  private final ConcurrentHashMap<DelegationTokenIdentifier, DelegationTokenInformation> tokens
+      = new ConcurrentHashMap<>();
+
+  private final AtomicInteger masterKeySeq = new AtomicInteger();
+  private Configuration conf;
+
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+  }
+
+  @Override
+  public Configuration getConf() {
+    return this.conf;
+  }
+
+  @Override
+  public int addMasterKey(String s) {
+    int keySeq = masterKeySeq.getAndIncrement();
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("addMasterKey: s = " + s + ", keySeq = " + keySeq);
+    }
+    masterKeys.put(keySeq, s);
+    return keySeq;
+  }
+
+  @Override
+  public void updateMasterKey(int keySeq, String s) {
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("updateMasterKey: s = " + s + ", keySeq = " + keySeq);
+    }
+    masterKeys.put(keySeq, s);
+  }
+
+  @Override
+  public boolean removeMasterKey(int keySeq) {
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("removeMasterKey: keySeq = " + keySeq);
+    }
+    return masterKeys.remove(keySeq) != null;
+  }
+
+  @Override
+  public String[] getMasterKeys() {
+    return masterKeys.values().toArray(new String[0]);
+  }
+
+  @Override
+  public boolean addToken(DelegationTokenIdentifier tokenIdentifier,
+    DelegationTokenInformation token) {
+    DelegationTokenInformation tokenInfo = tokens.putIfAbsent(tokenIdentifier, token);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("addToken: tokenIdentifier = " + tokenIdentifier + ", added = " + (tokenInfo == null));
+    }
+    return (tokenInfo == null);
+  }
+
+  @Override
+  public boolean removeToken(DelegationTokenIdentifier tokenIdentifier) {
+    DelegationTokenInformation tokenInfo = tokens.remove(tokenIdentifier);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("removeToken: tokenIdentifier = " + tokenIdentifier + ", removed = " + (tokenInfo != null));
+    }
+    return tokenInfo != null;
+  }
+
+  @Override
+  public DelegationTokenInformation getToken(DelegationTokenIdentifier tokenIdentifier) {
+    DelegationTokenInformation result = tokens.get(tokenIdentifier);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("getToken: tokenIdentifier = " + tokenIdentifier + ", result = " + result);
+    }
+    return result;
+  }
+
+  @Override
+  public List<DelegationTokenIdentifier> getAllDelegationTokenIdentifiers() {
+    List<DelegationTokenIdentifier> result = new ArrayList<>(
+        tokens.size());
+    for (DelegationTokenIdentifier id : tokens.keySet()) {
+        result.add(id);
+    }
+    return result;
+  }
+
+  @Override
+  public void close() throws IOException {
+    //no-op
+  }
+
+  @Override
+  public void init(Object hmsHandler, ServerMode smode) throws TokenStoreException {
+    // no-op
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/MetastoreDelegationTokenManager.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/MetastoreDelegationTokenManager.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/MetastoreDelegationTokenManager.java
new file mode 100644
index 0000000..2b0110f
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/MetastoreDelegationTokenManager.java
@@ -0,0 +1,172 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.hadoop.hive.metastore.security;
+
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.util.ReflectionUtils;
+
+public class MetastoreDelegationTokenManager {
+
+  protected DelegationTokenSecretManager secretManager;
+
+  public MetastoreDelegationTokenManager() {
+  }
+
+  public DelegationTokenSecretManager getSecretManager() {
+    return secretManager;
+  }
+
+  public void startDelegationTokenSecretManager(Configuration conf, Object hms, HadoopThriftAuthBridge.Server.ServerMode smode)
+      throws IOException {
+    long secretKeyInterval = MetastoreConf.getTimeVar(conf,
+        MetastoreConf.ConfVars.DELEGATION_KEY_UPDATE_INTERVAL, TimeUnit.MILLISECONDS);
+    long tokenMaxLifetime = MetastoreConf.getTimeVar(conf,
+        MetastoreConf.ConfVars.DELEGATION_TOKEN_MAX_LIFETIME, TimeUnit.MILLISECONDS);
+    long tokenRenewInterval = MetastoreConf.getTimeVar(conf,
+        MetastoreConf.ConfVars.DELEGATION_TOKEN_RENEW_INTERVAL, TimeUnit.MILLISECONDS);
+    long tokenGcInterval = MetastoreConf.getTimeVar(conf,
+        MetastoreConf.ConfVars.DELEGATION_TOKEN_GC_INTERVAL, TimeUnit.MILLISECONDS);
+
+    DelegationTokenStore dts = getTokenStore(conf);
+    dts.setConf(conf);
+    dts.init(hms, smode);
+    secretManager =
+        new TokenStoreDelegationTokenSecretManager(secretKeyInterval, tokenMaxLifetime,
+            tokenRenewInterval, tokenGcInterval, dts);
+    secretManager.startThreads();
+  }
+
+  public String getDelegationToken(final String owner, final String renewer, String remoteAddr)
+      throws IOException,
+      InterruptedException {
+    /*
+     * If the user asking the token is same as the 'owner' then don't do
+     * any proxy authorization checks. For cases like oozie, where it gets
+     * a delegation token for another user, we need to make sure oozie is
+     * authorized to get a delegation token.
+     */
+    // Do all checks on short names
+    UserGroupInformation currUser = UserGroupInformation.getCurrentUser();
+    UserGroupInformation ownerUgi = UserGroupInformation.createRemoteUser(owner);
+    if (!ownerUgi.getShortUserName().equals(currUser.getShortUserName())) {
+      // in the case of proxy users, the getCurrentUser will return the
+      // real user (for e.g. oozie) due to the doAs that happened just before the
+      // server started executing the method getDelegationToken in the MetaStore
+      ownerUgi = UserGroupInformation.createProxyUser(owner, UserGroupInformation.getCurrentUser());
+      ProxyUsers.authorize(ownerUgi, remoteAddr, null);
+    }
+    return ownerUgi.doAs(new PrivilegedExceptionAction<String>() {
+
+      @Override
+      public String run() throws IOException {
+        return secretManager.getDelegationToken(renewer);
+      }
+    });
+  }
+
+  public String getDelegationTokenWithService(String owner, String renewer, String service, String remoteAddr)
+      throws IOException, InterruptedException {
+    String token = getDelegationToken(owner, renewer, remoteAddr);
+    return addServiceToToken(token, service);
+  }
+
+  public long renewDelegationToken(String tokenStrForm)
+      throws IOException {
+    return secretManager.renewDelegationToken(tokenStrForm);
+  }
+
+  public String getUserFromToken(String tokenStr) throws IOException {
+    return secretManager.getUserFromToken(tokenStr);
+  }
+
+  public void cancelDelegationToken(String tokenStrForm) throws IOException {
+    secretManager.cancelDelegationToken(tokenStrForm);
+  }
+
+  /**
+   * Verify token string
+   * @param tokenStrForm
+   * @return user name
+   * @throws IOException
+   */
+  public String verifyDelegationToken(String tokenStrForm) throws IOException {
+    return secretManager.verifyDelegationToken(tokenStrForm);
+  }
+
+  private DelegationTokenStore getTokenStore(Configuration conf) throws IOException {
+    String tokenStoreClassName =
+        MetastoreConf.getVar(conf, MetastoreConf.ConfVars.DELEGATION_TOKEN_STORE_CLS, "");
+    // The second half of this if is to catch cases where users are passing in a HiveConf for
+    // configuration.  It will have set the default value of
+    // "hive.cluster.delegation.token.store .class" to
+    // "org.apache.hadoop.hive.thrift.MemoryTokenStore" as part of its construction.  But this is
+    // the hive-shims version of the memory store.  We want to convert this to our default value.
+    if (StringUtils.isBlank(tokenStoreClassName) ||
+        "org.apache.hadoop.hive.thrift.MemoryTokenStore".equals(tokenStoreClassName)) {
+      return new MemoryTokenStore();
+    }
+    try {
+      Class<? extends DelegationTokenStore> storeClass =
+          Class.forName(tokenStoreClassName).asSubclass(DelegationTokenStore.class);
+      return ReflectionUtils.newInstance(storeClass, conf);
+    } catch (ClassNotFoundException e) {
+      throw new IOException("Error initializing delegation token store: " + tokenStoreClassName, e);
+    }
+  }
+
+  /**
+   * Add a given service to delegation token string.
+   * @param tokenStr
+   * @param tokenService
+   * @return
+   * @throws IOException
+   */
+  public static String addServiceToToken(String tokenStr, String tokenService)
+      throws IOException {
+    Token<DelegationTokenIdentifier> delegationToken = createToken(tokenStr, tokenService);
+    return delegationToken.encodeToUrlString();
+  }
+
+  /**
+   * Create a new token using the given string and service
+   * @param tokenStr
+   * @param tokenService
+   * @return
+   * @throws IOException
+   */
+  private static Token<DelegationTokenIdentifier> createToken(String tokenStr, String tokenService)
+      throws IOException {
+    Token<DelegationTokenIdentifier> delegationToken = new Token<>();
+    delegationToken.decodeFromUrlString(tokenStr);
+    delegationToken.setService(new Text(tokenService));
+    return delegationToken;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TFilterTransport.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TFilterTransport.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TFilterTransport.java
new file mode 100644
index 0000000..79d317a
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TFilterTransport.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.security;
+
+import org.apache.thrift.transport.TTransport;
+import org.apache.thrift.transport.TTransportException;
+
+/**
+  * Transport that simply wraps another transport.
+  * This is the equivalent of FilterInputStream for Thrift transports.
+  */
+ public class TFilterTransport extends TTransport {
+   protected final TTransport wrapped;
+
+   public TFilterTransport(TTransport wrapped) {
+     this.wrapped = wrapped;
+   }
+
+   @Override
+   public void open() throws TTransportException {
+     wrapped.open();
+   }
+
+   @Override
+   public boolean isOpen() {
+     return wrapped.isOpen();
+   }
+
+   @Override
+   public boolean peek() {
+     return wrapped.peek();
+   }
+
+   @Override
+   public void close() {
+     wrapped.close();
+   }
+
+   @Override
+   public int read(byte[] buf, int off, int len) throws TTransportException {
+     return wrapped.read(buf, off, len);
+   }
+
+   @Override
+   public int readAll(byte[] buf, int off, int len) throws TTransportException {
+     return wrapped.readAll(buf, off, len);
+   }
+
+   @Override
+   public void write(byte[] buf) throws TTransportException {
+     wrapped.write(buf);
+   }
+
+   @Override
+   public void write(byte[] buf, int off, int len) throws TTransportException {
+     wrapped.write(buf, off, len);
+   }
+
+   @Override
+   public void flush() throws TTransportException {
+     wrapped.flush();
+   }
+
+   @Override
+   public byte[] getBuffer() {
+     return wrapped.getBuffer();
+   }
+
+   @Override
+   public int getBufferPosition() {
+     return wrapped.getBufferPosition();
+   }
+
+   @Override
+   public int getBytesRemainingInBuffer() {
+     return wrapped.getBytesRemainingInBuffer();
+   }
+
+   @Override
+   public void consumeBuffer(int len) {
+     wrapped.consumeBuffer(len);
+   }
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TUGIAssumingTransport.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TUGIAssumingTransport.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TUGIAssumingTransport.java
new file mode 100644
index 0000000..38a946e
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TUGIAssumingTransport.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.security;
+
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.thrift.transport.TTransport;
+import org.apache.thrift.transport.TTransportException;
+
+/**
+  * The Thrift SASL transports call Sasl.createSaslServer and Sasl.createSaslClient
+  * inside open(). So, we need to assume the correct UGI when the transport is opened
+  * so that the SASL mechanisms have access to the right principal. This transport
+  * wraps the Sasl transports to set up the right UGI context for open().
+  *
+  * This is used on the client side, where the API explicitly opens a transport to
+  * the server.
+  */
+ public class TUGIAssumingTransport extends TFilterTransport {
+   protected UserGroupInformation ugi;
+
+   public TUGIAssumingTransport(TTransport wrapped, UserGroupInformation ugi) {
+     super(wrapped);
+     this.ugi = ugi;
+   }
+
+   @Override
+   public void open() throws TTransportException {
+     try {
+       ugi.doAs(new PrivilegedExceptionAction<Void>() {
+         public Void run() {
+           try {
+             wrapped.open();
+           } catch (TTransportException tte) {
+             // Wrap the transport exception in an RTE, since UGI.doAs() then goes
+             // and unwraps this for us out of the doAs block. We then unwrap one
+             // more time in our catch clause to get back the TTE. (ugh)
+             throw new RuntimeException(tte);
+           }
+           return null;
+         }
+       });
+     } catch (IOException ioe) {
+       throw new RuntimeException("Received an ioe we never threw!", ioe);
+     } catch (InterruptedException ie) {
+       throw new RuntimeException("Received an ie we never threw!", ie);
+     } catch (RuntimeException rte) {
+       if (rte.getCause() instanceof TTransportException) {
+         throw (TTransportException)rte.getCause();
+       } else {
+         throw rte;
+       }
+     }
+   }
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/e2770d6e/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TUGIContainingTransport.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TUGIContainingTransport.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TUGIContainingTransport.java
new file mode 100644
index 0000000..acfe949
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/security/TUGIContainingTransport.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore.security;
+
+import java.net.Socket;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.thrift.transport.TSocket;
+import org.apache.thrift.transport.TTransport;
+import org.apache.thrift.transport.TTransportFactory;
+
+import com.google.common.collect.MapMaker;
+
+/** TUGIContainingTransport associates ugi information with connection (transport).
+ *  Wraps underlying <code>TSocket</code> transport and annotates it with ugi.
+*/
+
+public class TUGIContainingTransport extends TFilterTransport {
+
+  private UserGroupInformation ugi;
+
+  public TUGIContainingTransport(TTransport wrapped) {
+    super(wrapped);
+  }
+
+  public UserGroupInformation getClientUGI(){
+    return ugi;
+  }
+
+  public void setClientUGI(UserGroupInformation ugi){
+    this.ugi = ugi;
+  }
+
+  /**
+   * If the underlying TTransport is an instance of TSocket, it returns the Socket object
+   * which it contains.  Otherwise it returns null.
+   */
+  public Socket getSocket() {
+    if (wrapped instanceof TSocket) {
+      return (((TSocket)wrapped).getSocket());
+    }
+
+    return null;
+  }
+
+  /** Factory to create TUGIContainingTransport.
+   */
+
+  public static class Factory extends TTransportFactory {
+
+    // Need a concurrent weakhashmap. WeakKeys() so that when underlying transport gets out of
+    // scope, it still can be GC'ed. Since value of map has a ref to key, need weekValues as well.
+    private static final ConcurrentMap<TTransport, TUGIContainingTransport> transMap =
+        new MapMaker().weakKeys().weakValues().makeMap();
+
+    /**
+     * Get a new <code>TUGIContainingTransport</code> instance, or reuse the
+     * existing one if a <code>TUGIContainingTransport</code> has already been
+     * created before using the given <code>TTransport</code> as an underlying
+     * transport. This ensures that a given underlying transport instance
+     * receives the same <code>TUGIContainingTransport</code>.
+     */
+    @Override
+    public TUGIContainingTransport getTransport(TTransport trans) {
+
+      // UGI information is not available at connection setup time, it will be set later
+      // via set_ugi() rpc.
+      TUGIContainingTransport tugiTrans = transMap.get(trans);
+      if (tugiTrans == null) {
+        tugiTrans = new TUGIContainingTransport(trans);
+        TUGIContainingTransport prev = transMap.putIfAbsent(trans, tugiTrans);
+        if (prev != null) {
+          return prev;
+        }
+      }
+      return tugiTrans;
+    }
+  }
+}