You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by kw...@apache.org on 2016/09/30 02:14:28 UTC

[11/61] [partial] incubator-impala git commit: IMPALA-3786: Replace "cloudera" with "apache" (part 1)

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/MembershipSnapshot.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/MembershipSnapshot.java b/fe/src/main/java/com/cloudera/impala/util/MembershipSnapshot.java
deleted file mode 100644
index 57574fe..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/MembershipSnapshot.java
+++ /dev/null
@@ -1,84 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.util.Set;
-import java.util.concurrent.atomic.AtomicReference;
-
-import com.cloudera.impala.thrift.TNetworkAddress;
-import com.cloudera.impala.thrift.TUpdateMembershipRequest;
-import com.google.common.collect.Sets;
-
-/**
- * Singleton class that represents a snapshot of the Impalad cluster membership.  Host
- * membership is determined by both IP address and hostname (to mimic the backend's
- * SimpleScheduler).  A new snapshot is created whenever the cluster membership changes
- * so that clients don't need to hold a lock while examining a snapshot.
- */
-public class MembershipSnapshot {
-
-  // The latest instance of the MembershipSnapshot.
-  private static AtomicReference<MembershipSnapshot> cluster_ =
-      new AtomicReference<MembershipSnapshot>(new MembershipSnapshot());
-
-  // The set of hosts that are members of the cluster given by hostname.
-  private final Set<String> hostnames_;
-
-  // The set of hosts that are members of the cluster given by IP address.
-  private final Set<String> ipAddresses_;
-
-  // The number of nodes of the cluster.  Normally, this will be equal to
-  // hostnames_.size(), except in the test minicluster where there are multiple
-  // impalad's running on a single host.
-  private final int numNodes_;
-
-  // Used only to construct the initial MembershipSnapshot.  Before we get the first
-  // snapshot, assume one node (the localhost) to mimic SimpleScheduler.
-  private MembershipSnapshot() {
-    hostnames_ = Sets.newHashSet();
-    ipAddresses_ = Sets.newHashSet();
-    numNodes_ = 1;
-  }
-
-  // Construct a new snapshot based on the TUpdateMembershipRequest.
-  private MembershipSnapshot(TUpdateMembershipRequest request) {
-    hostnames_ = request.getHostnames();
-    ipAddresses_ = request.getIp_addresses();
-    numNodes_ = request.getNum_nodes();
-  }
-
-  // Determine whether a host, given either by IP address or hostname, is a member of
-  // this snapshot.  Returns true if it is, false otherwise.
-  public boolean contains(TNetworkAddress address) {
-    String host = address.getHostname();
-    return ipAddresses_.contains(host) || hostnames_.contains(host);
-  }
-
-  // The number of nodes in this snapshot.
-  public int numNodes() { return numNodes_; }
-
-  // Atomically update the singleton snapshot instance.  After the update completes,
-  // all calls to getCluster() will return the new snapshot.
-  public static void update(TUpdateMembershipRequest request) {
-    cluster_.set(new MembershipSnapshot(request));
-  }
-
-  // Return the current singleton snapshot instance.
-  public static MembershipSnapshot getCluster() { return cluster_.get(); }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/MetaStoreUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/MetaStoreUtil.java b/fe/src/main/java/com/cloudera/impala/util/MetaStoreUtil.java
deleted file mode 100644
index 540c749..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/MetaStoreUtil.java
+++ /dev/null
@@ -1,172 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.log4j.Logger;
-import org.apache.thrift.TException;
-
-import com.cloudera.impala.catalog.HdfsTable;
-import com.cloudera.impala.common.AnalysisException;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- * Utility methods for interacting with the Hive Metastore.
- */
-public class MetaStoreUtil {
-  private static final Logger LOG = Logger.getLogger(MetaStoreUtil.class);
-
-  // Maximum comment length, e.g., for columns, that can be stored in the HMS.
-  // This number is a lower bound of the constraint set in the HMS DB schema,
-  // because the constraint varies among different backing databases, e.g.,
-  // for Postgres it is 4000, but for most other databases it is 256.
-  public static final int CREATE_MAX_COMMENT_LENGTH = 256;
-
-  // Maximum length of the string representation of a type that the HMS can store.
-  public static final int MAX_TYPE_NAME_LENGTH = 4000;
-
-  // The longest strings Hive accepts for [serde] property keys.
-  public static final int MAX_PROPERTY_KEY_LENGTH = 256;
-
-  // The longest strings Hive accepts for [serde] property values.
-  public static final int MAX_PROPERTY_VALUE_LENGTH = 4000;
-
-  // The default maximum number of partitions to fetch from the Hive metastore in one
-  // RPC.
-  private static final short DEFAULT_MAX_PARTITIONS_PER_RPC = 1000;
-
-  // The maximum number of partitions to fetch from the metastore in one RPC.
-  // Read from the 'hive.metastore.batch.retrieve.table.partition.max' Hive configuration
-  // and defaults to DEFAULT_MAX_PARTITION_BATCH_SIZE if the value is not present in the
-  // Hive configuration.
-  private static short maxPartitionsPerRpc_ = DEFAULT_MAX_PARTITIONS_PER_RPC;
-
-  static {
-    // Get the value from the Hive configuration, if present.
-    HiveConf hiveConf = new HiveConf(HdfsTable.class);
-    String strValue = hiveConf.get(
-        HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX.toString());
-    if (strValue != null) {
-      try {
-        maxPartitionsPerRpc_ = Short.parseShort(strValue);
-      } catch (NumberFormatException e) {
-        LOG.error("Error parsing max partition batch size from HiveConfig: ", e);
-      }
-    }
-    if (maxPartitionsPerRpc_ <= 0) {
-      LOG.error(String.format("Invalid value for max partition batch size: %d. Using " +
-          "default: %d", maxPartitionsPerRpc_, DEFAULT_MAX_PARTITIONS_PER_RPC));
-      maxPartitionsPerRpc_ = DEFAULT_MAX_PARTITIONS_PER_RPC;
-    }
-  }
-
-  /**
-   * Fetches all partitions for a table in batches, with each batch containing at most
-   * 'maxPartsPerRpc' partitions. Returns a List containing all fetched Partitions.
-   * Will throw a MetaException if existing partitions are dropped while a fetch is in
-   * progress. To help protect against this, the operation can be retried if there is
-   * a MetaException by setting the "numRetries" parameter.
-   * Failures due to thrift exceptions (TExceptions) are not retried because they
-   * generally mean the connection is broken or has timed out. The HiveClient supports
-   * configuring retires at the connection level so it can be enabled independently.
-   */
-  public static List<org.apache.hadoop.hive.metastore.api.Partition> fetchAllPartitions(
-      IMetaStoreClient client, String dbName, String tblName, int numRetries)
-      throws MetaException, TException {
-    Preconditions.checkArgument(numRetries >= 0);
-    int retryAttempt = 0;
-    while (true) {
-      try {
-        // First, get all partition names that currently exist.
-        List<String> partNames = client.listPartitionNames(dbName, tblName, (short) -1);
-        return MetaStoreUtil.fetchPartitionsByName(client, partNames, dbName, tblName);
-      } catch (MetaException e) {
-        // Only retry for MetaExceptions, since TExceptions could indicate a broken
-        // connection which we can't recover from by retrying.
-        if (retryAttempt < numRetries) {
-          LOG.error(String.format("Error fetching partitions for table: %s.%s. " +
-              "Retry attempt: %d/%d", dbName, tblName, retryAttempt, numRetries), e);
-          ++retryAttempt;
-          // TODO: Sleep for a bit?
-        } else {
-          throw e;
-        }
-      }
-    }
-  }
-
-  /**
-   * Given a List of partition names, fetches the matching Partitions from the HMS
-   * in batches. Each batch will contain at most 'maxPartsPerRpc' partitions.
-   * Returns a List containing all fetched Partitions.
-   * Will throw a MetaException if any partitions in 'partNames' do not exist.
-   */
-  public static List<Partition> fetchPartitionsByName(
-      IMetaStoreClient client, List<String> partNames, String dbName, String tblName)
-      throws MetaException, TException {
-    LOG.trace(String.format("Fetching %d partitions for: %s.%s using partition " +
-        "batch size: %d", partNames.size(), dbName, tblName, maxPartitionsPerRpc_));
-
-    List<org.apache.hadoop.hive.metastore.api.Partition> fetchedPartitions =
-        Lists.newArrayList();
-    // Fetch the partitions in batches.
-    for (int i = 0; i < partNames.size(); i += maxPartitionsPerRpc_) {
-      // Get a subset of partition names to fetch.
-      List<String> partsToFetch =
-          partNames.subList(i, Math.min(i + maxPartitionsPerRpc_, partNames.size()));
-      // Fetch these partitions from the metastore.
-      fetchedPartitions.addAll(
-          client.getPartitionsByNames(dbName, tblName, partsToFetch));
-    }
-    return fetchedPartitions;
-  }
-
-  /**
-   * Checks that a given 'property' is short enough for HMS to handle. If not, throws an
-   * 'AnalysisException' with 'name' as its prefix.
-   */
-  public static void checkShortProperty(String name, String property, int length)
-      throws AnalysisException {
-    if (property.length() > length) {
-      throw new AnalysisException(
-          name + " length must be <= " + length + ": " + property.length());
-    }
-  }
-
-  /**
-   * Checks that each key and value in a proprty map is short enough for HMS to handle. If
-   * not, An 'AnalysisException' is thrown with 'mapName' as its prefix.
-   */
-  public static void checkShortPropertyMap(
-      String mapName, Map<String, String> propertyMap) throws AnalysisException {
-    if (null != propertyMap) {
-      for (Map.Entry<String, String> property : propertyMap.entrySet()) {
-        checkShortProperty(mapName + " key", property.getKey(), MAX_PROPERTY_KEY_LENGTH);
-        checkShortProperty(
-            mapName + " value", property.getValue(), MAX_PROPERTY_VALUE_LENGTH);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/NativeLibUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/NativeLibUtil.java b/fe/src/main/java/com/cloudera/impala/util/NativeLibUtil.java
deleted file mode 100644
index 08c929b..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/NativeLibUtil.java
+++ /dev/null
@@ -1,44 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.io.File;
-
-public class NativeLibUtil {
-  /**
-   * Attempts to load the given library from all paths in java.libary.path.
-   * Throws a RuntimeException if the library was unable to be loaded from
-   * any location.
-   */
-  public static void loadLibrary(String libFileName) {
-    boolean found = false;
-    String javaLibPath = System.getProperty("java.library.path");
-    for (String path: javaLibPath.split(":")) {
-      File libFile = new File(path + File.separator + libFileName);
-      if (libFile.exists()) {
-        System.load(libFile.getPath());
-        found = true;
-        break;
-      }
-    }
-    if (!found) {
-      throw new RuntimeException("Failed to load " + libFileName + " from any " +
-          "location in java.library.path (" + javaLibPath + ").");
-    }
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/NativeLogger.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/NativeLogger.java b/fe/src/main/java/com/cloudera/impala/util/NativeLogger.java
deleted file mode 100644
index 9956ca4..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/NativeLogger.java
+++ /dev/null
@@ -1,51 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Class that manages loading and calling the native logging library to forward
- * log4j log messages to be logged by glog.
- */
-public class NativeLogger {
-  private final static Logger LOG = LoggerFactory.getLogger(NativeLogger.class);
-  private static boolean loaded_ = false;
-
-  // Writes a log message to glog
-  private native static void Log(int severity, String msg, String filename, int line);
-
-  public static void LogToGlog(int severity, String msg, String filename, int line) {
-    try {
-      Log(severity, msg, filename, line);
-    } catch (UnsatisfiedLinkError e) {
-      loadLibrary();
-      Log(severity, msg, filename, line);
-    }
-  }
-
-  /**
-   * Loads the native logging support library.
-   */
-  private static synchronized void loadLibrary() {
-    if (loaded_) return;
-    NativeLibUtil.loadLibrary("libloggingsupport.so");
-    loaded_ = true;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/PatternMatcher.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/PatternMatcher.java b/fe/src/main/java/com/cloudera/impala/util/PatternMatcher.java
deleted file mode 100644
index bc85d18..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/PatternMatcher.java
+++ /dev/null
@@ -1,108 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import com.google.common.collect.Lists;
-
-/**
- * Utility class to handle pattern-matching for different types of patterns (
- * e.g. hive SHOW patterns, JDBC patterns).
- * It maps those patterns onto the java regex pattern objects.
- */
-public class PatternMatcher {
-  // Patterns to match against. A string is considered to match if it matches
-  // any of the patterns.
-  private List<Pattern> patterns_;
-
-  // Returns true if patterns_ is null or the candidate matches.
-  // Returns false if patterns_ is empty or the candidate mismatches.
-  public boolean matches(String candidate) {
-    if (patterns_ == null) return true;
-    if (patterns_.isEmpty()) return false;
-    for (Pattern pattern: patterns_) {
-      if (pattern.matcher(candidate).matches()) return true;
-    }
-    return false;
-  }
-
-  // Immutable pattern matcher that matches all
-  private final static class MatchAllPatternMatcher extends PatternMatcher {
-    MatchAllPatternMatcher() {}
-    public boolean matches(String candidate) { return true; }
-  }
-
-  // Immutable pattern matcher that matches none
-  private final static class MatchNonePatternMatcher extends PatternMatcher {
-    MatchNonePatternMatcher() {}
-    public boolean matches(String candidate) { return false; }
-  }
-
-  public static final PatternMatcher MATCHER_MATCH_ALL = new MatchAllPatternMatcher();
-  public static final PatternMatcher MATCHER_MATCH_NONE = new MatchNonePatternMatcher();
-
-  /**
-   * Creates a pattern matcher for hive patterns.
-   * The only metacharacters are '*' which matches any string of characters, and '|'
-   * which denotes choice.
-   * If hivePattern is null, all strings are considered to match. If it is the
-   * empty string, no strings match.
-   */
-  public static PatternMatcher createHivePatternMatcher(String hivePattern) {
-    if (hivePattern == null) return MATCHER_MATCH_ALL;
-    if (hivePattern.isEmpty()) return MATCHER_MATCH_NONE;
-    PatternMatcher result = new PatternMatcher();
-    result.patterns_ = Lists.newArrayList();
-    // Hive ignores pretty much all metacharacters, so we have to escape them.
-    final String metaCharacters = "+?.^()]\\/{}";
-    final Pattern regex = Pattern.compile("([" + Pattern.quote(metaCharacters) + "])");
-
-    for (String pattern: Arrays.asList(hivePattern.split("\\|"))) {
-      Matcher matcher = regex.matcher(pattern);
-      pattern = matcher.replaceAll("\\\\$1").replace("*", ".*");
-      result.patterns_.add(Pattern.compile(pattern));
-    }
-    return result;
-  }
-
-  /**
-   * Creates a matcher object for JDBC match strings.
-   */
-  public static PatternMatcher createJdbcPatternMatcher(String pattern) {
-    if (pattern == null || pattern.isEmpty()) {
-      return MATCHER_MATCH_ALL;
-    }
-    String wildcardPattern = ".*";
-    String result = pattern
-        .replaceAll("([^\\\\])%", "$1" + wildcardPattern)
-        .replaceAll("\\\\%", "%")
-        .replaceAll("^%", wildcardPattern)
-        .replaceAll("([^\\\\])_", "$1.")
-        .replaceAll("\\\\_", "_")
-        .replaceAll("^_", ".");
-    PatternMatcher matcher = new PatternMatcher();
-    matcher.patterns_ = Lists.newArrayList();
-    matcher.patterns_.add(Pattern.compile(result));
-    return matcher;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/RequestPoolService.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/RequestPoolService.java b/fe/src/main/java/com/cloudera/impala/util/RequestPoolService.java
deleted file mode 100644
index c1c9bd7..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/RequestPoolService.java
+++ /dev/null
@@ -1,451 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.io.File;
-import java.io.IOException;
-import java.net.MalformedURLException;
-import java.net.URL;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.yarn.api.records.QueueACL;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.AllocationConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.AllocationFileLoaderService;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairSchedulerConfiguration;
-import org.apache.thrift.TException;
-import org.apache.thrift.TSerializer;
-import org.apache.thrift.protocol.TBinaryProtocol;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.common.ByteUnits;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.common.JniUtil;
-import com.cloudera.impala.thrift.TErrorCode;
-import com.cloudera.impala.thrift.TPoolConfigParams;
-import com.cloudera.impala.thrift.TPoolConfig;
-import com.cloudera.impala.thrift.TResolveRequestPoolParams;
-import com.cloudera.impala.thrift.TResolveRequestPoolResult;
-import com.cloudera.impala.thrift.TStatus;
-import com.cloudera.impala.util.FileWatchService.FileChangeListener;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-
-/**
- * Admission control utility class that provides user to request pool mapping, ACL
- * enforcement, and pool configuration values. Pools are configured via a fair scheduler
- * allocation file (fair-scheduler.xml) and Llama configuration (llama-site.xml). This
- * class wraps a number of Hadoop classes to provide the user to pool mapping,
- * authorization, and accessing memory resource limits, all of which are specified in
- * the fair scheduler allocation file. The other pool limits are specified in the
- * Llama configuration, and those properties are accessed via the standard
- * {@link Configuration} API.
- *
- * Both the allocation configuration and Llama configuration files are watched for
- * changes and reloaded when necessary. The allocation file is watched/loaded using the
- * Yarn {@link AllocationFileLoaderService} and the Llama configuration uses a subclass of
- * the {@link FileWatchService}. There are two different mechanisms because there is
- * different parsing/configuration code for the allocation file and the Llama
- * configuration (which is a regular Hadoop conf file so it can use the
- * {@link Configuration} class). start() and stop() will start/stop watching and reloading
- * both of these files.
- *
- * A single instance is created by the backend and lasts the duration of the process.
- */
-public class RequestPoolService {
-  final static Logger LOG = LoggerFactory.getLogger(RequestPoolService.class);
-
-  private final static TBinaryProtocol.Factory protocolFactory_ =
-      new TBinaryProtocol.Factory();
-  // Used to ensure start() has been called before any other methods can be used.
-  private final AtomicBoolean running_;
-
-  // Key for the default maximum number of running queries ("placed reservations")
-  // property. The per-pool key name is this key with the pool name appended, e.g.
-  // "{key}.{pool}". This is a llama-site.xml configuration.
-  final static String LLAMA_MAX_PLACED_RESERVATIONS_KEY =
-      "llama.am.throttling.maximum.placed.reservations";
-
-  // Default value for the maximum.placed.reservations property. Note that this value
-  // differs from the current Llama default of 10000.
-  final static int LLAMA_MAX_PLACED_RESERVATIONS_DEFAULT = -1;
-
-  // Key for the default maximum number of queued requests ("queued reservations")
-  // property. The per-pool key name is this key with the pool name appended, e.g.
-  // "{key}.{pool}". This is a llama-site.xml configuration.
-  final static String LLAMA_MAX_QUEUED_RESERVATIONS_KEY =
-      "llama.am.throttling.maximum.queued.reservations";
-
-  // Default value for the maximum.queued.reservations property. Note that this value
-  // differs from the current Llama default of 0 which disables queuing.
-  final static int LLAMA_MAX_QUEUED_RESERVATIONS_DEFAULT = 200;
-
-  // Key for the pool queue timeout (milliseconds). This is be specified in the
-  // llama-site.xml but is Impala-specific and Llama does not use this.
-  final static String QUEUE_TIMEOUT_KEY = "impala.admission-control.pool-queue-timeout-ms";
-
-  // Default value of the pool queue timeout (ms).
-  final static int QUEUE_TIMEOUT_MS_DEFAULT = 60 * 1000;
-
-  // Key for the pool default query options. Query options are specified as a
-  // comma delimited string of 'key=value' pairs, e.g. 'key1=val1,key2=val2'.
-  // This is specified in the llama-site.xml but is Impala-specific and Llama does not
-  // use this.
-  final static String QUERY_OPTIONS_KEY = "impala.admission-control.pool-default-query-options";
-
-  // String format for a per-pool configuration key. First parameter is the key for the
-  // default, e.g. LLAMA_MAX_PLACED_RESERVATIONS_KEY, and the second parameter is the
-  // pool name.
-  final static String LLAMA_PER_POOL_CONFIG_KEY_FORMAT = "%s.%s";
-
-  // Watches for changes to the fair scheduler allocation file.
-  @VisibleForTesting
-  final AllocationFileLoaderService allocLoader_;
-
-  // Provides access to the fair scheduler allocation file. An AtomicReference becaus it
-  // is reset when the allocation configuration file changes and other threads access it.
-  private final AtomicReference<AllocationConfiguration> allocationConf_;
-
-  // Watches the Llama configuration file for changes.
-  @VisibleForTesting
-  final FileWatchService llamaConfWatcher_;
-
-  // Used by this class to access to the configs provided by the Llama configuration.
-  // This is replaced when the Llama configuration file changes.
-  private volatile Configuration llamaConf_;
-
-  // URL of the Llama configuration file.
-  private final URL llamaConfUrl_;
-
-  /**
-   * Updates the Llama configuration when the file changes. The file is llamaConfUrl_
-   * and it will exist when this is created (or RequestPoolService will not start). If
-   * the file is later removed, warnings will be written to the log but the previous
-   * configuration will still be accessible.
-   */
-  private final class LlamaConfWatcher implements FileChangeListener {
-    public void onFileChange() {
-      // If llamaConfUrl_ is null the watcher should not have been created.
-      Preconditions.checkNotNull(llamaConfUrl_);
-      LOG.info("Loading Llama configuration: " + llamaConfUrl_.getFile());
-      Configuration conf = new Configuration();
-      conf.addResource(llamaConfUrl_);
-      llamaConf_ = conf;
-    }
-  }
-
-  /**
-   * Creates a RequestPoolService instance with a configuration containing the specified
-   * fair-scheduler.xml and llama-site.xml.
-   *
-   * @param fsAllocationPath path to the fair scheduler allocation file.
-   * @param llamaSitePath path to the Llama configuration file.
-   */
-  public RequestPoolService(final String fsAllocationPath, final String llamaSitePath) {
-    Preconditions.checkNotNull(fsAllocationPath);
-    running_ = new AtomicBoolean(false);
-    allocationConf_ = new AtomicReference<AllocationConfiguration>();
-    URL fsAllocationURL = getURL(fsAllocationPath);
-    if (fsAllocationURL == null) {
-      throw new IllegalArgumentException(
-          "Unable to find allocation configuration file: " + fsAllocationPath);
-    }
-    Configuration allocConf = new Configuration(false);
-    allocConf.set(FairSchedulerConfiguration.ALLOCATION_FILE, fsAllocationURL.getPath());
-    allocLoader_ = new AllocationFileLoaderService();
-    allocLoader_.init(allocConf);
-
-    if (!Strings.isNullOrEmpty(llamaSitePath)) {
-      llamaConfUrl_ = getURL(llamaSitePath);
-      if (llamaConfUrl_ == null) {
-        throw new IllegalArgumentException(
-            "Unable to find Llama configuration file: " + llamaSitePath);
-      }
-      llamaConf_ = new Configuration(false);
-      llamaConf_.addResource(llamaConfUrl_);
-      llamaConfWatcher_ = new FileWatchService(new File(llamaConfUrl_.getPath()),
-          new LlamaConfWatcher());
-    } else {
-      llamaConfWatcher_ = null;
-      llamaConfUrl_ = null;
-    }
-  }
-
-  /**
-   * Returns a {@link URL} for the file if it exists, null otherwise.
-   */
-  @VisibleForTesting
-  static URL getURL(String path) {
-    Preconditions.checkNotNull(path);
-    File file = new File(path);
-    file = file.getAbsoluteFile();
-    if (!file.exists()) {
-      LOG.error("Unable to find specified file: " + path);
-      return null;
-    }
-    try {
-      return file.toURI().toURL();
-    } catch (MalformedURLException ex) {
-      LOG.error("Unable to construct URL for file: " + path, ex);
-      return null;
-    }
-  }
-
-  /**
-   * Starts the RequestPoolService instance. It does the initial loading of the
-   * configuration and starts the automatic reloading.
-   */
-  public void start() {
-    Preconditions.checkState(!running_.get());
-    allocLoader_.setReloadListener(new AllocationFileLoaderService.Listener() {
-      @Override
-      public void onReload(AllocationConfiguration info) {
-        allocationConf_.set(info);
-      }
-    });
-    allocLoader_.start();
-    try {
-      allocLoader_.reloadAllocations();
-    } catch (Exception ex) {
-      try {
-        stopInternal();
-      } catch (Exception stopEx) {
-        LOG.error("Unable to stop AllocationFileLoaderService after failed start.",
-            stopEx);
-      }
-      throw new RuntimeException(ex);
-    }
-    if (llamaConfWatcher_ != null) llamaConfWatcher_.start();
-    running_.set(true);
-  }
-
-  /**
-   * Stops the RequestPoolService instance. Only used by tests.
-   */
-  public void stop() {
-    Preconditions.checkState(running_.get());
-    stopInternal();
-  }
-
-  /**
-   * Stops the RequestPoolService instance without checking the running state. Only
-   * called by stop() (which is only used in tests) or by start() if a failure occurs.
-   * Should not be called more than once.
-   */
-  private void stopInternal() {
-    running_.set(false);
-    if (llamaConfWatcher_ != null) llamaConfWatcher_.stop();
-    allocLoader_.stop();
-  }
-
-  /**
-   * Resolves a user and pool to the pool specified by the allocation placement policy
-   * and checks if the user is authorized to submit requests.
-   *
-   * @param thriftResolvePoolParams Serialized {@link TResolveRequestPoolParams}
-   * @return serialized {@link TResolveRequestPoolResult}
-   */
-  public byte[] resolveRequestPool(byte[] thriftResolvePoolParams)
-      throws ImpalaException {
-    TResolveRequestPoolParams resolvePoolParams = new TResolveRequestPoolParams();
-    JniUtil.deserializeThrift(protocolFactory_, resolvePoolParams,
-        thriftResolvePoolParams);
-    TResolveRequestPoolResult result = resolveRequestPool(resolvePoolParams);
-    LOG.info("resolveRequestPool(pool={}, user={}): resolved_pool={}, has_access={}",
-        new Object[] { resolvePoolParams.getRequested_pool(), resolvePoolParams.getUser(),
-                       result.resolved_pool, result.has_access });
-    try {
-      return new TSerializer(protocolFactory_).serialize(result);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage());
-    }
-  }
-
-  @VisibleForTesting
-  TResolveRequestPoolResult resolveRequestPool(
-      TResolveRequestPoolParams resolvePoolParams) throws InternalException {
-    String requestedPool = resolvePoolParams.getRequested_pool();
-    String user = resolvePoolParams.getUser();
-    TResolveRequestPoolResult result = new TResolveRequestPoolResult();
-    String errorMessage = null;
-    String pool = null;
-    try {
-      pool = assignToPool(requestedPool, user);
-    } catch (IOException ex) {
-      errorMessage = ex.getMessage();
-      if (errorMessage.startsWith("No groups found for user")) {
-        // The error thrown when using the 'primaryGroup' or 'secondaryGroup' rules and
-        // the user does not exist are not helpful.
-        errorMessage = String.format(
-            "Failed to resolve user '%s' to a pool while evaluating the " +
-            "'primaryGroup' or 'secondaryGroup' queue placement rules because no " +
-            "groups were found for the user. This is likely because the user does not " +
-            "exist on the local operating system.", resolvePoolParams.getUser());
-      }
-      LOG.warn(String.format("Error assigning to pool. requested='%s', user='%s', msg=%s",
-          requestedPool, user, errorMessage), ex);
-    }
-    if (pool == null) {
-      if (errorMessage == null) {
-        // This occurs when assignToPool returns null (not an error), i.e. if the pool
-        // cannot be resolved according to the policy.
-        result.setStatus(new TStatus(TErrorCode.OK, Lists.<String>newArrayList()));
-      } else {
-        // If Yarn throws an exception, return an error status.
-        result.setStatus(
-            new TStatus(TErrorCode.INTERNAL_ERROR, Lists.newArrayList(errorMessage)));
-      }
-    } else {
-      result.setResolved_pool(pool);
-      result.setHas_access(hasAccess(pool, user));
-      result.setStatus(new TStatus(TErrorCode.OK, Lists.<String>newArrayList()));
-    }
-    return result;
-  }
-
-  /**
-   * Gets the pool configuration values for the specified pool.
-   *
-   * @param thriftPoolConfigParams Serialized {@link TPoolConfigParams}
-   * @return serialized {@link TPoolConfig}
-   */
-  public byte[] getPoolConfig(byte[] thriftPoolConfigParams) throws ImpalaException {
-    Preconditions.checkState(running_.get());
-    TPoolConfigParams poolConfigParams = new TPoolConfigParams();
-    JniUtil.deserializeThrift(protocolFactory_, poolConfigParams,
-        thriftPoolConfigParams);
-    TPoolConfig result = getPoolConfig(poolConfigParams.getPool());
-    try {
-      return new TSerializer(protocolFactory_).serialize(result);
-    } catch (TException e) {
-      throw new InternalException(e.getMessage());
-    }
-  }
-
-  @VisibleForTesting
-  TPoolConfig getPoolConfig(String pool) {
-    TPoolConfig result = new TPoolConfig();
-    long maxMemoryMb = allocationConf_.get().getMaxResources(pool).getMemory();
-    result.setMax_mem_resources(
-        maxMemoryMb == Integer.MAX_VALUE ? -1 : (long) maxMemoryMb * ByteUnits.MEGABYTE);
-    if (llamaConf_ == null) {
-      result.setMax_requests(LLAMA_MAX_PLACED_RESERVATIONS_DEFAULT);
-      result.setMax_queued(LLAMA_MAX_QUEUED_RESERVATIONS_DEFAULT);
-      result.setDefault_query_options("");
-    } else {
-      // Capture the current llamaConf_ in case it changes while we're using it.
-      Configuration currentLlamaConf = llamaConf_;
-      result.setMax_requests(getLlamaPoolConfigValue(currentLlamaConf, pool,
-          LLAMA_MAX_PLACED_RESERVATIONS_KEY,
-          LLAMA_MAX_PLACED_RESERVATIONS_DEFAULT));
-      result.setMax_queued(getLlamaPoolConfigValue(currentLlamaConf, pool,
-          LLAMA_MAX_QUEUED_RESERVATIONS_KEY,
-          LLAMA_MAX_QUEUED_RESERVATIONS_DEFAULT));
-
-      // Only return positive values. Admission control has a default from gflags.
-      int queueTimeoutMs = getLlamaPoolConfigValue(currentLlamaConf, pool,
-          QUEUE_TIMEOUT_KEY, -1);
-      if (queueTimeoutMs > 0) result.setQueue_timeout_ms(queueTimeoutMs);
-      result.setDefault_query_options(getLlamaPoolConfigValue(currentLlamaConf, pool,
-          QUERY_OPTIONS_KEY, ""));
-    }
-    LOG.info("getPoolConfig(pool={}): max_mem_resources={}, max_requests={}, " +
-        "max_queued={},  queue_timeout_ms={}, default_query_options={}",
-        new Object[] { pool, result.max_mem_resources, result.max_requests,
-            result.max_queued, result.queue_timeout_ms, result.default_query_options });
-    return result;
-  }
-
-  /**
-   * Looks up the per-pool integer config from the llama Configuration. First checks for
-   * a per-pool value, then a default set in the config, and lastly to the specified
-   * 'defaultValue'.
-   *
-   * @param conf The Configuration to use, provided so the caller can ensure the same
-   *        Configuration is used to look up multiple properties.
-   */
-  private int getLlamaPoolConfigValue(Configuration conf, String pool, String key,
-      int defaultValue) {
-    return conf.getInt(String.format(LLAMA_PER_POOL_CONFIG_KEY_FORMAT, key, pool),
-        conf.getInt(key, defaultValue));
-  }
-
-  /**
-   * Looks up the per-pool String config from the llama Configuration. See above.
-   */
-  private String getLlamaPoolConfigValue(Configuration conf, String pool, String key,
-      String defaultValue) {
-    return conf.get(String.format(LLAMA_PER_POOL_CONFIG_KEY_FORMAT, key, pool),
-        conf.get(key, defaultValue));
-  }
-
-  /**
-   * Resolves the actual pool to use via the allocation placement policy. The policy may
-   * change the requested pool.
-   *
-   * @param requestedPool The requested pool. May not be null, an empty string indicates
-   * the policy should return the default pool for this user.
-   * @param user The user, must not be null or empty.
-   * @return the actual pool to use, null if a pool could not be resolved.
-   */
-  @VisibleForTesting
-  String assignToPool(String requestedPool, String user)
-      throws InternalException, IOException {
-    Preconditions.checkState(running_.get());
-    Preconditions.checkNotNull(requestedPool);
-    Preconditions.checkArgument(!Strings.isNullOrEmpty(user));
-    // Convert the user name to a short name (e.g. 'user1@domain' to 'user1') because
-    // assignAppToQueue() will check group membership which should always be done on
-    // the short name of the principal.
-    String shortName = new User(user).getShortName();
-    return allocationConf_.get().getPlacementPolicy().assignAppToQueue(
-        requestedPool.isEmpty() ? YarnConfiguration.DEFAULT_QUEUE_NAME : requestedPool,
-        shortName);
-  }
-
-  /**
-   * Indicates if a user has access to the pool.
-   *
-   * @param pool the pool to check if the user has access to. NOTE: it should always be
-   * called with a pool returned by the {@link #assignToPool(String, String)} method.
-   * @param user the user to check if it has access to the pool.
-   * @return True if the user has access to the pool.
-   */
-  @VisibleForTesting
-  boolean hasAccess(String pool, String user) throws InternalException {
-    Preconditions.checkState(running_.get());
-    Preconditions.checkArgument(!Strings.isNullOrEmpty(pool));
-    Preconditions.checkArgument(!Strings.isNullOrEmpty(user));
-    // Convert the user name to a short name (e.g. 'user1@domain' to 'user1') because
-    // the UserGroupInformation will check group membership which should always be done
-    // on the short name of the principal.
-    String shortName;
-    User requestingUser = new User(user);
-    shortName = requestingUser.getShortName();
-    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(shortName);
-    return allocationConf_.get().hasAccess(pool, QueueACL.SUBMIT_APPLICATIONS, ugi);
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/SentryPolicyService.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/SentryPolicyService.java b/fe/src/main/java/com/cloudera/impala/util/SentryPolicyService.java
deleted file mode 100644
index 4f39b3c..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/SentryPolicyService.java
+++ /dev/null
@@ -1,466 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.util.List;
-
-import org.apache.sentry.SentryUserException;
-import org.apache.sentry.provider.db.SentryAccessDeniedException;
-import org.apache.sentry.provider.db.SentryAlreadyExistsException;
-import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient;
-import org.apache.sentry.provider.db.service.thrift.TSentryGrantOption;
-import org.apache.sentry.provider.db.service.thrift.TSentryPrivilege;
-import org.apache.sentry.provider.db.service.thrift.TSentryRole;
-import org.apache.sentry.service.thrift.SentryServiceClientFactory;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.cloudera.impala.analysis.PrivilegeSpec;
-import com.cloudera.impala.authorization.SentryConfig;
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.catalog.AuthorizationException;
-import com.cloudera.impala.catalog.RolePrivilege;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.InternalException;
-import com.cloudera.impala.thrift.TPrivilege;
-import com.cloudera.impala.thrift.TPrivilegeLevel;
-import com.cloudera.impala.thrift.TPrivilegeScope;
-import com.google.common.base.Joiner;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-
-/**
- *  Wrapper around the SentryService APIs that are used by Impala and Impala tests.
- */
-public class SentryPolicyService {
-  private final static Logger LOG = LoggerFactory.getLogger(SentryPolicyService.class);
-  private final String ACCESS_DENIED_ERROR_MSG =
-      "User '%s' does not have privileges to execute: %s";
-  private final SentryConfig config_;
-
-  /**
-   * Wrapper around a SentryPolicyServiceClient.
-   * TODO: When SENTRY-296 is resolved we can more easily cache connections instead of
-   * opening a new connection for each request.
-   */
-  class SentryServiceClient {
-    private final SentryPolicyServiceClient client_;
-
-    /**
-     * Creates and opens a new Sentry Service thrift client.
-     */
-    public SentryServiceClient() throws InternalException {
-      client_ = createClient();
-    }
-
-    /**
-     * Get the underlying SentryPolicyServiceClient.
-     */
-    public SentryPolicyServiceClient get() {
-      return client_;
-    }
-
-    /**
-     * Returns this client back to the connection pool. Can be called multiple times.
-     */
-    public void close() {
-      client_.close();
-    }
-
-    /**
-     * Creates a new client to the SentryService.
-     */
-    private SentryPolicyServiceClient createClient() throws InternalException {
-      SentryPolicyServiceClient client;
-      try {
-        client = SentryServiceClientFactory.create(config_.getConfig());
-      } catch (Exception e) {
-        throw new InternalException("Error creating Sentry Service client: ", e);
-      }
-      return client;
-    }
-  }
-
-  public SentryPolicyService(SentryConfig config) {
-    config_ = config;
-  }
-
-  /**
-   * Drops a role.
-   *
-   * @param requestingUser - The requesting user.
-   * @param roleName - The role to drop.
-   * @param ifExists - If true, no error is thrown if the role does not exist.
-   * @throws ImpalaException - On any error dropping the role.
-   */
-  public void dropRole(User requestingUser, String roleName, boolean ifExists)
-      throws ImpalaException {
-    LOG.trace(String.format("Dropping role: %s on behalf of: %s", roleName,
-        requestingUser.getName()));
-    SentryServiceClient client = new SentryServiceClient();
-    try {
-      if (ifExists) {
-        client.get().dropRoleIfExists(requestingUser.getShortName(), roleName);
-      } else {
-        client.get().dropRole(requestingUser.getShortName(), roleName);
-      }
-    } catch (SentryAccessDeniedException e) {
-      throw new AuthorizationException(String.format(ACCESS_DENIED_ERROR_MSG,
-          requestingUser.getName(), "DROP_ROLE"));
-    } catch (SentryUserException e) {
-      throw new InternalException("Error dropping role: ", e);
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * Creates a new role.
-   *
-   * @param requestingUser - The requesting user.
-   * @param roleName - The role to create.
-   * @param ifNotExists - If true, no error is thrown if the role already exists.
-   * @throws ImpalaException - On any error creating the role.
-   */
-  public void createRole(User requestingUser, String roleName, boolean ifNotExists)
-      throws ImpalaException {
-    LOG.trace(String.format("Creating role: %s on behalf of: %s", roleName,
-        requestingUser.getName()));
-    SentryServiceClient client = new SentryServiceClient();
-    try {
-      client.get().createRole(requestingUser.getShortName(), roleName);
-    } catch (SentryAccessDeniedException e) {
-      throw new AuthorizationException(String.format(ACCESS_DENIED_ERROR_MSG,
-          requestingUser.getName(), "CREATE_ROLE"));
-    } catch (SentryAlreadyExistsException e) {
-      if (ifNotExists) return;
-      throw new InternalException("Error creating role: ", e);
-    } catch (SentryUserException e) {
-      throw new InternalException("Error creating role: ", e);
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * Grants a role to a group.
-   *
-   * @param requestingUser - The requesting user.
-   * @param roleName - The role to grant to a group. Role must already exist.
-   * @param groupName - The group to grant the role to.
-   * @throws ImpalaException - On any error.
-   */
-  public void grantRoleToGroup(User requestingUser, String roleName, String groupName)
-      throws ImpalaException {
-    LOG.trace(String.format("Granting role '%s' to group '%s' on behalf of: %s",
-        roleName, groupName, requestingUser.getName()));
-    SentryServiceClient client = new SentryServiceClient();
-    try {
-      client.get().grantRoleToGroup(requestingUser.getShortName(), groupName, roleName);
-    } catch (SentryAccessDeniedException e) {
-      throw new AuthorizationException(String.format(ACCESS_DENIED_ERROR_MSG,
-          requestingUser.getName(), "GRANT_ROLE"));
-    } catch (SentryUserException e) {
-      throw new InternalException(
-          "Error making 'grantRoleToGroup' RPC to Sentry Service: ", e);
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * Removes a role from a group.
-   *
-   * @param requestingUser - The requesting user.
-   * @param roleName - The role name to remove.
-   * @param groupName - The group to remove the role from.
-   * @throws InternalException - On any error.
-   */
-  public void revokeRoleFromGroup(User requestingUser, String roleName, String groupName)
-      throws ImpalaException {
-    LOG.trace(String.format("Revoking role '%s' from group '%s' on behalf of: %s",
-        roleName, groupName, requestingUser.getName()));
-    SentryServiceClient client = new SentryServiceClient();
-    try {
-      client.get().revokeRoleFromGroup(requestingUser.getShortName(),
-          groupName, roleName);
-    } catch (SentryAccessDeniedException e) {
-      throw new AuthorizationException(String.format(ACCESS_DENIED_ERROR_MSG,
-          requestingUser.getName(), "REVOKE_ROLE"));
-    } catch (SentryUserException e) {
-      throw new InternalException(
-          "Error making 'revokeRoleFromGroup' RPC to Sentry Service: ", e);
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * Grants a privilege to an existing role.
-   */
-  public void grantRolePrivilege(User requestingUser, String roleName,
-      TPrivilege privilege) throws ImpalaException {
-    grantRolePrivileges(requestingUser, roleName, Lists.newArrayList(privilege));
-  }
-
-  /**
-   * Grants privileges to an existing role.
-   *
-   * @param requestingUser - The requesting user.
-   * @param roleName - The role to grant privileges to (case insensitive).
-   * @param privilege - The privilege to grant.
-   * @throws ImpalaException - On any error
-   */
-  public void grantRolePrivileges(User requestingUser, String roleName,
-      List<TPrivilege> privileges) throws ImpalaException {
-    Preconditions.checkState(!privileges.isEmpty());
-    TPrivilege privilege = privileges.get(0);
-    TPrivilegeScope scope = privilege.getScope();
-    LOG.trace(String.format("Granting role '%s' '%s' privilege on '%s' on behalf of: %s",
-        roleName, privilege.getPrivilege_level().toString(), scope.toString(),
-        requestingUser.getName()));
-    // Verify that all privileges have the same scope.
-    for (int i = 1; i < privileges.size(); ++i) {
-      Preconditions.checkState(privileges.get(i).getScope() == scope, "All the " +
-          "privileges must have the same scope.");
-    }
-    Preconditions.checkState(scope == TPrivilegeScope.COLUMN || privileges.size() == 1,
-        "Cannot grant multiple " + scope + " privileges with a singe RPC to the " +
-        "Sentry Service.");
-    SentryServiceClient client = new SentryServiceClient();
-    try {
-      switch (scope) {
-        case SERVER:
-          client.get().grantServerPrivilege(requestingUser.getShortName(), roleName,
-              privilege.getServer_name(), privilege.getPrivilege_level().toString(),
-              privilege.isHas_grant_opt());
-          break;
-        case DATABASE:
-          client.get().grantDatabasePrivilege(requestingUser.getShortName(), roleName,
-              privilege.getServer_name(), privilege.getDb_name(),
-              privilege.getPrivilege_level().toString(),
-              privilege.isHas_grant_opt());
-          break;
-        case TABLE:
-          client.get().grantTablePrivilege(requestingUser.getShortName(), roleName,
-              privilege.getServer_name(), privilege.getDb_name(),
-              privilege.getTable_name(), privilege.getPrivilege_level().toString(),
-              privilege.isHas_grant_opt());
-          break;
-        case COLUMN:
-          client.get().grantColumnsPrivileges(requestingUser.getShortName(), roleName,
-              privilege.getServer_name(), privilege.getDb_name(),
-              privilege.getTable_name(), getColumnNames(privileges),
-              privilege.getPrivilege_level().toString(), privilege.isHas_grant_opt());
-          break;
-        case URI:
-          client.get().grantURIPrivilege(requestingUser.getShortName(),
-              roleName, privilege.getServer_name(), privilege.getUri(),
-              privilege.isHas_grant_opt());
-          break;
-      }
-    } catch (SentryAccessDeniedException e) {
-      throw new AuthorizationException(String.format(ACCESS_DENIED_ERROR_MSG,
-          requestingUser.getName(), "GRANT_PRIVILEGE"));
-    } catch (SentryUserException e) {
-      throw new InternalException(
-          "Error making 'grantPrivilege*' RPC to Sentry Service: ", e);
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * Revokes a privilege from an existing role.
-   */
-  public void revokeRolePrivilege(User requestingUser, String roleName,
-      TPrivilege privilege) throws ImpalaException {
-    revokeRolePrivileges(requestingUser, roleName, Lists.newArrayList(privilege));
-  }
-
-  /**
-   * Revokes privileges from an existing role.
-   *
-   * @param requestingUser - The requesting user.
-   * @param roleName - The role to revoke privileges from (case insensitive).
-   * @param privilege - The privilege to revoke.
-   * @throws ImpalaException - On any error
-   */
-  public void revokeRolePrivileges(User requestingUser, String roleName,
-      List<TPrivilege> privileges) throws ImpalaException {
-    Preconditions.checkState(!privileges.isEmpty());
-    TPrivilege privilege = privileges.get(0);
-    TPrivilegeScope scope = privilege.getScope();
-    LOG.trace(String.format("Revoking from role '%s' '%s' privilege on '%s' on " +
-        "behalf of: %s", roleName, privilege.getPrivilege_level().toString(),
-        scope.toString(), requestingUser.getName()));
-    // Verify that all privileges have the same scope.
-    for (int i = 1; i < privileges.size(); ++i) {
-      Preconditions.checkState(privileges.get(i).getScope() == scope, "All the " +
-          "privileges must have the same scope.");
-    }
-    Preconditions.checkState(scope == TPrivilegeScope.COLUMN || privileges.size() == 1,
-        "Cannot revoke multiple " + scope + " privileges with a singe RPC to the " +
-        "Sentry Service.");
-    SentryServiceClient client = new SentryServiceClient();
-    try {
-      switch (scope) {
-        case SERVER:
-          client.get().revokeServerPrivilege(requestingUser.getShortName(), roleName,
-              privilege.getServer_name(), privilege.getPrivilege_level().toString());
-          break;
-        case DATABASE:
-          client.get().revokeDatabasePrivilege(requestingUser.getShortName(), roleName,
-              privilege.getServer_name(), privilege.getDb_name(),
-              privilege.getPrivilege_level().toString(), null);
-          break;
-        case TABLE:
-          client.get().revokeTablePrivilege(requestingUser.getShortName(), roleName,
-              privilege.getServer_name(), privilege.getDb_name(),
-              privilege.getTable_name(), privilege.getPrivilege_level().toString(),
-              null);
-          break;
-        case COLUMN:
-          client.get().revokeColumnsPrivilege(requestingUser.getShortName(), roleName,
-              privilege.getServer_name(), privilege.getDb_name(),
-              privilege.getTable_name(), getColumnNames(privileges),
-              privilege.getPrivilege_level().toString(), null);
-          break;
-        case URI:
-          client.get().revokeURIPrivilege(requestingUser.getShortName(),
-              roleName, privilege.getServer_name(), privilege.getUri(),
-              null);
-          break;
-      }
-    } catch (SentryAccessDeniedException e) {
-      throw new AuthorizationException(String.format(ACCESS_DENIED_ERROR_MSG,
-          requestingUser.getName(), "REVOKE_PRIVILEGE"));
-    } catch (SentryUserException e) {
-      throw new InternalException(
-          "Error making 'revokePrivilege*' RPC to Sentry Service: ", e);
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * Returns the column names referenced in a list of column-level privileges.
-   * Verifies that all column-level privileges refer to the same table.
-   */
-  private List<String> getColumnNames(List<TPrivilege> privileges) {
-    List<String> columnNames = Lists.newArrayList();
-    String tablePath = PrivilegeSpec.getTablePath(privileges.get(0));
-    columnNames.add(privileges.get(0).getColumn_name());
-    // Collect all column names and verify that they belong to the same table.
-    for (int i = 1; i < privileges.size(); ++i) {
-      TPrivilege privilege = privileges.get(i);
-      Preconditions.checkState(tablePath.equals(PrivilegeSpec.getTablePath(privilege))
-          && privilege.getScope() == TPrivilegeScope.COLUMN);
-      columnNames.add(privileges.get(i).getColumn_name());
-    }
-    return columnNames;
-  }
-
-  /**
-   * Lists all roles granted to all groups a user belongs to.
-   */
-  public List<TSentryRole> listUserRoles(User requestingUser)
-      throws ImpalaException {
-    SentryServiceClient client = new SentryServiceClient();
-    try {
-      return Lists.newArrayList(client.get().listUserRoles(
-          requestingUser.getShortName()));
-    } catch (SentryAccessDeniedException e) {
-      throw new AuthorizationException(String.format(ACCESS_DENIED_ERROR_MSG,
-          requestingUser.getName(), "LIST_USER_ROLES"));
-    } catch (SentryUserException e) {
-      throw new InternalException(
-          "Error making 'listUserRoles' RPC to Sentry Service: ", e);
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * Lists all roles.
-   */
-  public List<TSentryRole> listAllRoles(User requestingUser) throws ImpalaException {
-    SentryServiceClient client = new SentryServiceClient();
-    try {
-      return Lists.newArrayList(client.get().listRoles(requestingUser.getShortName()));
-    } catch (SentryAccessDeniedException e) {
-      throw new AuthorizationException(String.format(ACCESS_DENIED_ERROR_MSG,
-          requestingUser.getName(), "LIST_ROLES"));
-    } catch (SentryUserException e) {
-      throw new InternalException("Error making 'listRoles' RPC to Sentry Service: ", e);
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * Lists all privileges granted to a role.
-   */
-  public List<TSentryPrivilege> listRolePrivileges(User requestingUser, String roleName)
-      throws ImpalaException {
-    SentryServiceClient client = new SentryServiceClient();
-    try {
-      return Lists.newArrayList(client.get().listAllPrivilegesByRoleName(
-          requestingUser.getShortName(), roleName));
-    } catch (SentryAccessDeniedException e) {
-      throw new AuthorizationException(String.format(ACCESS_DENIED_ERROR_MSG,
-          requestingUser.getName(), "LIST_ROLE_PRIVILEGES"));
-    } catch (SentryUserException e) {
-      throw new InternalException("Error making 'listAllPrivilegesByRoleName' RPC to " +
-          "Sentry Service: ", e);
-    } finally {
-      client.close();
-    }
-  }
-
-  /**
-   * Utility function that converts a TSentryPrivilege to an Impala TPrivilege object.
-   */
-  public static TPrivilege sentryPrivilegeToTPrivilege(TSentryPrivilege sentryPriv) {
-    TPrivilege privilege = new TPrivilege();
-    privilege.setServer_name(sentryPriv.getServerName());
-    if (sentryPriv.isSetDbName()) privilege.setDb_name(sentryPriv.getDbName());
-    if (sentryPriv.isSetTableName()) privilege.setTable_name(sentryPriv.getTableName());
-    if (sentryPriv.isSetColumnName()) {
-      privilege.setColumn_name(sentryPriv.getColumnName());
-    }
-    if (sentryPriv.isSetURI()) privilege.setUri(sentryPriv.getURI());
-    privilege.setScope(Enum.valueOf(TPrivilegeScope.class,
-        sentryPriv.getPrivilegeScope().toUpperCase()));
-    if (sentryPriv.getAction().equals("*")) {
-      privilege.setPrivilege_level(TPrivilegeLevel.ALL);
-    } else {
-      privilege.setPrivilege_level(Enum.valueOf(TPrivilegeLevel.class,
-          sentryPriv.getAction().toUpperCase()));
-    }
-    privilege.setPrivilege_name(RolePrivilege.buildRolePrivilegeName(privilege));
-    privilege.setCreate_time_ms(sentryPriv.getCreateTime());
-    if (sentryPriv.isSetGrantOption() &&
-        sentryPriv.getGrantOption() == TSentryGrantOption.TRUE) {
-      privilege.setHas_grant_opt(true);
-    } else {
-      privilege.setHas_grant_opt(false);
-    }
-    return privilege;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/SentryProxy.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/SentryProxy.java b/fe/src/main/java/com/cloudera/impala/util/SentryProxy.java
deleted file mode 100644
index 76e4931..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/SentryProxy.java
+++ /dev/null
@@ -1,348 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.log4j.Logger;
-import org.apache.sentry.provider.db.service.thrift.TSentryGroup;
-import org.apache.sentry.provider.db.service.thrift.TSentryPrivilege;
-import org.apache.sentry.provider.db.service.thrift.TSentryRole;
-
-import com.cloudera.impala.authorization.SentryConfig;
-import com.cloudera.impala.authorization.User;
-import com.cloudera.impala.catalog.AuthorizationException;
-import com.cloudera.impala.catalog.CatalogException;
-import com.cloudera.impala.catalog.CatalogServiceCatalog;
-import com.cloudera.impala.catalog.Role;
-import com.cloudera.impala.catalog.RolePrivilege;
-import com.cloudera.impala.common.ImpalaException;
-import com.cloudera.impala.common.ImpalaRuntimeException;
-import com.cloudera.impala.thrift.TPrivilege;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
-/**
- * Thread safe class that acts as a link between the Sentry Service and the Catalog
- * to ensure both places are updated consistently. More specifically, this class
- * synchronizes updates to the Sentry Service and the Impala catalog to ensure
- * they are applied atomically (in Impala's view) and only if reading/writing the
- * policy via the Sentry Service succeeds. Note that there may be external updates
- * to the Sentry Service that cannot be protected against.
- * It also periodically refreshes the authorization policy metadata and updates the
- * catalog with any changes. Because any catalog updates need to be synchronized with
- * updates from GRANT/REVOKE statements, it makes sense for this class to
- * synchronize all modifications.
- */
-public class SentryProxy {
-  private static final Logger LOG = Logger.getLogger(SentryProxy.class);
-
-  // Used to periodically poll the Sentry Service and updates the catalog with any
-  // changes.
-  private final ScheduledExecutorService policyReader_ =
-      Executors.newScheduledThreadPool(1);
-
-  // The Catalog the SentryPolicyUpdater is associated with.
-  private final CatalogServiceCatalog catalog_;
-
-  // The interface to access the Sentry Policy Service to read policy metadata.
-  private final SentryPolicyService sentryPolicyService_;
-
-  // This is the user that the Catalog Service is running as. For kerberized clusters,
-  // this is set to the Kerberos principal of Catalog. This user should always be a
-  // Sentry Service admin => have full rights to read/update the Sentry Service.
-  private final User processUser_;
-
-  public SentryProxy(SentryConfig sentryConfig, CatalogServiceCatalog catalog,
-      String kerberosPrincipal) {
-    Preconditions.checkNotNull(catalog);
-    Preconditions.checkNotNull(sentryConfig);
-    catalog_ = catalog;
-    if (Strings.isNullOrEmpty(kerberosPrincipal)) {
-      processUser_ = new User(System.getProperty("user.name"));
-    } else {
-      processUser_ = new User(kerberosPrincipal);
-    }
-    sentryPolicyService_ = new SentryPolicyService(sentryConfig);
-    // Sentry Service is enabled.
-    // TODO: Make this configurable
-    policyReader_.scheduleAtFixedRate(new PolicyReader(), 0, 60,
-        TimeUnit.SECONDS);
-  }
-
-  /**
-   * Refreshes the authorization policy metadata by querying the Sentry Policy Service.
-   * There is currently no way to get a snapshot of the policy from the Sentry Service,
-   * so it is possible that Impala will end up in a state that is not consistent with a
-   * state the Sentry Service has ever been in. For example, consider the case where a
-   * refresh is running and all privileges for Role A have been processed. Before moving
-   * to Role B, the user revokes a privilege from Role A and grants it to Role B.
-   * Impala will temporarily (until the next refresh) think the privilege is granted to
-   * Role A AND to Role B.
-   * TODO: Think more about consistency as well as how to recover from errors that leave
-   * the policy in a potentially inconsistent state (an RPC fails part-way through a
-   * refresh). We should also consider applying this entire update to the catalog
-   * atomically.
-   */
-  private class PolicyReader implements Runnable {
-    public void run() {
-      synchronized (SentryProxy.this) {
-        // Assume all roles should be removed. Then query the Policy Service and remove
-        // roles from this set that actually exist.
-        Set<String> rolesToRemove = catalog_.getAuthPolicy().getAllRoleNames();
-        try {
-          // Read the full policy, adding new/modified roles to "updatedRoles".
-          for (TSentryRole sentryRole:
-              sentryPolicyService_.listAllRoles(processUser_)) {
-            // This role exists and should not be removed, delete it from the
-            // rolesToRemove set.
-            rolesToRemove.remove(sentryRole.getRoleName().toLowerCase());
-
-            Set<String> grantGroups = Sets.newHashSet();
-            for (TSentryGroup group: sentryRole.getGroups()) {
-              grantGroups.add(group.getGroupName());
-            }
-            Role existingRole =
-                catalog_.getAuthPolicy().getRole(sentryRole.getRoleName());
-            Role role;
-            // These roles are the same, use the current role.
-            if (existingRole != null &&
-                existingRole.getGrantGroups().equals(grantGroups)) {
-              role = existingRole;
-            } else {
-              role = catalog_.addRole(sentryRole.getRoleName(), grantGroups);
-            }
-
-            // Assume all privileges should be removed. Privileges that still exist are
-            // deleted from this set and we are left with the set of privileges that need
-            // to be removed.
-            Set<String> privilegesToRemove = role.getPrivilegeNames();
-
-            // Check all the privileges that are part of this role.
-            for (TSentryPrivilege sentryPriv:
-                sentryPolicyService_.listRolePrivileges(processUser_, role.getName())) {
-              TPrivilege thriftPriv =
-                  SentryPolicyService.sentryPrivilegeToTPrivilege(sentryPriv);
-              thriftPriv.setRole_id(role.getId());
-              privilegesToRemove.remove(thriftPriv.getPrivilege_name().toLowerCase());
-
-              RolePrivilege existingPriv =
-                  role.getPrivilege(thriftPriv.getPrivilege_name());
-              // We already know about this privilege (privileges cannot be modified).
-              if (existingPriv != null &&
-                  existingPriv.getCreateTimeMs() == sentryPriv.getCreateTime()) {
-                continue;
-              }
-              catalog_.addRolePrivilege(role.getName(), thriftPriv);
-            }
-
-            // Remove the privileges that no longer exist.
-            for (String privilegeName: privilegesToRemove) {
-              TPrivilege privilege = new TPrivilege();
-              privilege.setPrivilege_name(privilegeName);
-              catalog_.removeRolePrivilege(role.getName(), privilege);
-            }
-          }
-        } catch (Exception e) {
-          LOG.error("Error refreshing Sentry policy: ", e);
-          return;
-        }
-
-        // Remove all the roles, incrementing the catalog version to indicate
-        // a change.
-        for (String roleName: rolesToRemove) {
-          catalog_.removeRole(roleName);
-        }
-      }
-    }
-  }
-
-  /**
-   * Checks whether this user is an admin on the Sentry Service. Throws an
-   * AuthorizationException if the user does not have admin privileges or if there are
-   * any issues communicating with the Sentry Service..
-   * @param requestingUser - The requesting user.
-   */
-  public void checkUserSentryAdmin(User requestingUser)
-      throws AuthorizationException {
-    // Check if the user has access by issuing a read-only RPC.
-    // TODO: This is not an elegant way to verify whether the user has privileges to
-    // access Sentry. This should be modified in the future when Sentry has
-    // a more robust mechanism to perform these checks.
-    try {
-      sentryPolicyService_.listAllRoles(requestingUser);
-    } catch (ImpalaException e) {
-      throw new AuthorizationException(String.format("User '%s' does not have " +
-          "privileges to access the requested policy metadata or Sentry Service is " +
-          "unavailable.", requestingUser.getName()));
-    }
-  }
-
-  /**
-   * Creates a new role using the Sentry Service and updates the Impala catalog.
-   * If the RPC to the Sentry Service fails the Impala catalog will not
-   * be modified. Returns the new Role.
-   * Throws exception if there was any error updating the Sentry Service or
-   * if a role with the same name already exists in the catalog. This includes
-   * the case where a role was added externally (eg. via Hive). If the role was added
-   * externally, Impala will load it during the next refresh of the policy.
-   * TODO: Consider adding the role to the policy if we find it was created
-   * externally.
-   */
-  public synchronized Role createRole(User user, String roleName)
-      throws ImpalaException {
-    Role role = null;
-    if (catalog_.getAuthPolicy().getRole(roleName) != null) {
-      throw new CatalogException("Role already exists: " + roleName);
-    }
-    sentryPolicyService_.createRole(user, roleName, false);
-    // Initially the role has no grant groups (empty set).
-    role = catalog_.addRole(roleName, Sets.<String>newHashSet());
-    return role;
-  }
-
-  /**
-   * Drops the given role using the Sentry Service and updates the Impala catalog.
-   * If the RPC to the Sentry Service fails the Impala catalog will not
-   * be modified. Returns the removed Role or null if the role did not exist in the
-   * Catalog.
-   * Throws exception if there was any error updating the Sentry Service.
-   */
-  public synchronized Role dropRole(User user, String roleName) throws ImpalaException {
-    sentryPolicyService_.dropRole(user, roleName, false);
-    return catalog_.removeRole(roleName);
-  }
-
-  /**
-   * Removes the role grant group using the Sentry Service and updates the Impala
-   * catalog. If the RPC to the Sentry Service fails the Impala catalog will not
-   * be modified. Returns the updated Role.
-   * Throws exception if there was any error updating the Sentry Service or if the Impala
-   * catalog does not contain the given role name.
-   */
-  public synchronized Role grantRoleGroup(User user, String roleName, String groupName)
-      throws ImpalaException {
-    sentryPolicyService_.grantRoleToGroup(user, roleName, groupName);
-    return catalog_.addRoleGrantGroup(roleName, groupName);
-  }
-
-  /**
-   * Removes the role grant group using the Sentry Service and updates the Impala
-   * catalog. If the RPC to the Sentry Service fails the Impala catalog will not
-   * be modified. Returns the updated Role.
-   * Throws exception if there was any error updating the Sentry Service or if the Impala
-   * catalog does not contain the given role name.
-   */
-  public synchronized Role revokeRoleGroup(User user, String roleName, String groupName)
-      throws ImpalaException {
-    sentryPolicyService_.revokeRoleFromGroup(user, roleName, groupName);
-    return catalog_.removeRoleGrantGroup(roleName, groupName);
-  }
-
-  /**
-   * Grants privileges to a role in the Sentry Service and updates the Impala
-   * catalog. If the RPC to the Sentry Service fails, the Impala catalog will not
-   * be modified. Returns the granted privileges.
-   * Throws exception if there was any error updating the Sentry Service or if the Impala
-   * catalog does not contain the given role name.
-   */
-  public synchronized List<RolePrivilege> grantRolePrivileges(User user,
-      String roleName, List<TPrivilege> privileges) throws ImpalaException {
-    sentryPolicyService_.grantRolePrivileges(user, roleName, privileges);
-    // Update the catalog
-    List<RolePrivilege> rolePrivileges = Lists.newArrayList();
-    for (TPrivilege privilege: privileges) {
-      rolePrivileges.add(catalog_.addRolePrivilege(roleName, privilege));
-    }
-    return rolePrivileges;
-  }
-
-  /**
-   * Revokes privileges from a role in the Sentry Service and updates the Impala
-   * catalog. If the RPC to the Sentry Service fails the Impala catalog will not be
-   * modified. Returns the removed privileges. Throws an exception if there was any error
-   * updating the Sentry Service or if the Impala catalog does not contain the given role
-   * name.
-   */
-  public synchronized List<RolePrivilege> revokeRolePrivileges(User user,
-      String roleName, List<TPrivilege> privileges, boolean hasGrantOption)
-      throws ImpalaException {
-    List<RolePrivilege> rolePrivileges = Lists.newArrayList();
-    if (!hasGrantOption) {
-      sentryPolicyService_.revokeRolePrivileges(user, roleName, privileges);
-      // Update the catalog
-      for (TPrivilege privilege: privileges) {
-        RolePrivilege rolePriv = catalog_.removeRolePrivilege(roleName, privilege);
-        if (rolePriv == null) {
-          rolePriv = RolePrivilege.fromThrift(privilege);
-          rolePriv.setCatalogVersion(catalog_.getCatalogVersion());
-        }
-        rolePrivileges.add(rolePriv);
-      }
-    } else {
-      // If the REVOKE GRANT OPTION has been specified, the privileges should not be
-      // removed, they should just be updated to clear the GRANT OPTION flag. Sentry
-      // does not yet provide an "alter privilege" API so we need to revoke the
-      // privileges and re-grant them.
-      sentryPolicyService_.revokeRolePrivileges(user, roleName, privileges);
-      List<TPrivilege> updatedPrivileges = Lists.newArrayList();
-      for (TPrivilege privilege: privileges) {
-        RolePrivilege existingPriv = catalog_.getRolePrivilege(roleName, privilege);
-        if (existingPriv == null) {
-          RolePrivilege rolePriv = RolePrivilege.fromThrift(privilege);
-          rolePriv.setCatalogVersion(catalog_.getCatalogVersion());
-          rolePrivileges.add(rolePriv);
-          continue;
-        }
-        TPrivilege updatedPriv = existingPriv.toThrift();
-        updatedPriv.setHas_grant_opt(false);
-        updatedPrivileges.add(updatedPriv);
-      }
-      // Re-grant the updated privileges.
-      sentryPolicyService_.grantRolePrivileges(user, roleName, updatedPrivileges);
-      // Update the catalog
-      for (TPrivilege updatedPriv: updatedPrivileges) {
-        rolePrivileges.add(catalog_.addRolePrivilege(roleName, updatedPriv));
-      }
-    }
-    return rolePrivileges;
-  }
-
-  /**
-   * Perfoms a synchronous refresh of all authorization policy metadata and updates
-   * the Catalog with any changes. Throws an ImpalaRuntimeException if there are any
-   * errors executing the refresh job.
-   */
-  public void refresh() throws ImpalaRuntimeException {
-    try {
-      policyReader_.submit(new PolicyReader()).get();
-    } catch (Exception e) {
-      // We shouldn't make it here. It means an exception leaked from the
-      // AuthorizationPolicyReader.
-      throw new ImpalaRuntimeException("Error refreshing authorization policy, " +
-          "current policy state may be inconsistent. Running 'invalidate metadata' " +
-          "may resolve this problem: ", e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/StatsHelper.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/StatsHelper.java b/fe/src/main/java/com/cloudera/impala/util/StatsHelper.java
deleted file mode 100644
index 9956c79..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/StatsHelper.java
+++ /dev/null
@@ -1,60 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-/**
- *  This is a utility class to incrementally calculate average, variance
- *  and standard deviation. It's based on an algorithm devised by Knuth.
- *
- *  Please keep in mind, that there might be edge cases where the below algorithm
- *  might produce a loss of precision.
- *
- *  See below link for more detail:
- *
- *  http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Incremental_algorithm
- */
-public class StatsHelper<T extends Number> {
-
-  private long count_ = 0;
-
-  // Current mean
-  private double mean_ = 0.0d;
-
-  // Sum of the square differences from the mean
-  private double m2_ = 0.0d;
-
-  public void addSample(T val) {
-    ++count_;
-    mean_ += (val.doubleValue() - mean_) / count_;
-    m2_ += Math.pow(val.doubleValue() - mean_, 2);
-  }
-
-  public long count() { return count_; }
-
-  public double mean() {
-    return count_ > 0 ? mean_ : 0.0;
-  }
-
-  public double variance() {
-    return count_ > 1 ? m2_ / (count_ - 1) : 0.0d;
-  }
-
-  public double stddev() {
-    return Math.sqrt(variance());
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/TAccessLevelUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/TAccessLevelUtil.java b/fe/src/main/java/com/cloudera/impala/util/TAccessLevelUtil.java
deleted file mode 100644
index 2e58efa..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/TAccessLevelUtil.java
+++ /dev/null
@@ -1,35 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import com.cloudera.impala.thrift.TAccessLevel;
-
-/**
- * Utility functions for working with TAccessLevel types.
- */
-public class TAccessLevelUtil {
-  public static boolean impliesWriteAccess(TAccessLevel level) {
-    return level == TAccessLevel.READ_WRITE ||
-           level == TAccessLevel.WRITE_ONLY;
-  }
-
-  public static boolean impliesReadAccess(TAccessLevel level) {
-    return level == TAccessLevel.READ_WRITE ||
-           level == TAccessLevel.READ_ONLY;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/TColumnValueUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/TColumnValueUtil.java b/fe/src/main/java/com/cloudera/impala/util/TColumnValueUtil.java
deleted file mode 100644
index 23d6eb6..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/TColumnValueUtil.java
+++ /dev/null
@@ -1,51 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import com.cloudera.impala.thrift.TColumnValue;
-
-/**
- * Utility functions for working with TColumnValue objects.
- */
-public class TColumnValueUtil {
-  /**
-   * Extract numeric value from TColumnValue.
-   */
-  public static double getNumericVal(TColumnValue val) {
-    if (val.isSetByte_val()) {
-      return (double) val.byte_val;
-    } else if (val.isSetShort_val()) {
-      return (double) val.short_val;
-    } else if (val.isSetInt_val()) {
-      return (double) val.int_val;
-    } else if (val.isSetLong_val()) {
-      return (double) val.long_val;
-    } else if (val.isSetDouble_val()) {
-      return (double) val.double_val;
-    } else if (val.isSetString_val()) {
-      // we always return decimals as strings, even with as_ascii=false
-      // in Expr::GetValue()
-      try {
-        return Double.valueOf(val.string_val);
-      } catch (NumberFormatException e) {
-        return 0;
-      }
-    }
-    return 0;
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/b544f019/fe/src/main/java/com/cloudera/impala/util/TResultRowBuilder.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/com/cloudera/impala/util/TResultRowBuilder.java b/fe/src/main/java/com/cloudera/impala/util/TResultRowBuilder.java
deleted file mode 100644
index ae70eb4..0000000
--- a/fe/src/main/java/com/cloudera/impala/util/TResultRowBuilder.java
+++ /dev/null
@@ -1,64 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package com.cloudera.impala.util;
-
-import com.cloudera.impala.common.PrintUtils;
-import com.cloudera.impala.thrift.TColumnValue;
-import com.cloudera.impala.thrift.TResultRow;
-
-/**
- * Utility class for building TResultRows.
- */
-public class TResultRowBuilder {
-  private final TResultRow row_ = new TResultRow();
-
-  public TResultRowBuilder add(long val) {
-    TColumnValue colVal = new TColumnValue();
-    colVal.setLong_val(val);
-    row_.addToColVals(colVal);
-    return this;
-  }
-
-  public TResultRowBuilder add(double val) {
-    TColumnValue colVal = new TColumnValue();
-    colVal.setDouble_val(val);
-    row_.addToColVals(colVal);
-    return this;
-  }
-
-  public TResultRowBuilder add(String val) {
-    TColumnValue colVal = new TColumnValue();
-    colVal.setString_val(val);
-    row_.addToColVals(colVal);
-    return this;
-  }
-
-  public TResultRowBuilder addBytes(long val) {
-    TColumnValue colVal = new TColumnValue();
-    colVal.setString_val(PrintUtils.printBytes(val));
-    row_.addToColVals(colVal);
-    return this;
-  }
-
-  public TResultRowBuilder reset() {
-    row_.clear();
-    return this;
-  }
-
-  public TResultRow get() { return row_; }
-}