You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by mi...@apache.org on 2018/07/16 15:53:56 UTC

[3/5] impala git commit: IMPALA-7295: Remove IMPALA_MINICLUSTER_PROFILE=2

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-3/java/org/apache/impala/compat/HdfsShim.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/compat/HdfsShim.java b/fe/src/compat-minicluster-profile-3/java/org/apache/impala/compat/HdfsShim.java
deleted file mode 100644
index 9453f80..0000000
--- a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/compat/HdfsShim.java
+++ /dev/null
@@ -1,30 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.impala.compat;
-
-import org.apache.hadoop.fs.FileStatus;
-
-/**
- * Wrapper classes to abstract away differences between HDFS versions in
- * the MiniCluster profiles.
- */
-public class HdfsShim {
-  public static boolean isErasureCoded(FileStatus fileStatus) {
-    return fileStatus.isErasureCoded();
-  }
-}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-3/java/org/apache/impala/compat/MetastoreShim.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/compat/MetastoreShim.java b/fe/src/compat-minicluster-profile-3/java/org/apache/impala/compat/MetastoreShim.java
deleted file mode 100644
index 3d69545..0000000
--- a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/compat/MetastoreShim.java
+++ /dev/null
@@ -1,127 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.impala.compat;
-
-import java.util.List;
-
-import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hive.service.rpc.thrift.TGetColumnsReq;
-import org.apache.hive.service.rpc.thrift.TGetFunctionsReq;
-import org.apache.hive.service.rpc.thrift.TGetSchemasReq;
-import org.apache.hive.service.rpc.thrift.TGetTablesReq;
-import org.apache.impala.authorization.User;
-import org.apache.impala.common.ImpalaException;
-import org.apache.impala.common.Pair;
-import org.apache.impala.service.Frontend;
-import org.apache.impala.service.MetadataOp;
-import org.apache.impala.thrift.TMetadataOpRequest;
-import org.apache.impala.thrift.TResultSet;
-import org.apache.thrift.TException;
-
-/**
- * A wrapper around some of Hive's Metastore API's to abstract away differences
- * between major versions of Hive. This implements the shimmed methods for Hive 2.
- */
-public class MetastoreShim {
-  /**
-   * Wrapper around MetaStoreUtils.validateName() to deal with added arguments.
-   */
-  public static boolean validateName(String name) {
-    return MetaStoreUtils.validateName(name, null);
-  }
-
-  /**
-   * Wrapper around IMetaStoreClient.alter_partition() to deal with added
-   * arguments.
-   */
-  public static void alterPartition(IMetaStoreClient client, Partition partition)
-      throws InvalidOperationException, MetaException, TException {
-    client.alter_partition(
-        partition.getDbName(), partition.getTableName(), partition, null);
-  }
-
-  /**
-   * Wrapper around IMetaStoreClient.alter_partitions() to deal with added
-   * arguments.
-   */
-  public static void alterPartitions(IMetaStoreClient client, String dbName,
-      String tableName, List<Partition> partitions)
-      throws InvalidOperationException, MetaException, TException {
-    client.alter_partitions(dbName, tableName, partitions, null);
-  }
-
-  /**
-   * Wrapper around MetaStoreUtils.updatePartitionStatsFast() to deal with added
-   * arguments.
-   */
-  public static void updatePartitionStatsFast(Partition partition, Warehouse warehouse)
-      throws MetaException {
-    MetaStoreUtils.updatePartitionStatsFast(partition, warehouse, null);
-  }
-
-  /**
-   * Return the maximum number of Metastore objects that should be retrieved in
-   * a batch.
-   */
-  public static String metastoreBatchRetrieveObjectsMaxConfigKey() {
-    return HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX.toString();
-  }
-
-  /**
-   * Return the key and value that should be set in the partition parameters to
-   * mark that the stats were generated automatically by a stats task.
-   */
-  public static Pair<String, String> statsGeneratedViaStatsTaskParam() {
-    return Pair.create(StatsSetupConst.STATS_GENERATED, StatsSetupConst.TASK);
-  }
-
-  public static TResultSet execGetFunctions(
-      Frontend frontend, TMetadataOpRequest request, User user) throws ImpalaException {
-    TGetFunctionsReq req = request.getGet_functions_req();
-    return MetadataOp.getFunctions(
-        frontend, req.getCatalogName(), req.getSchemaName(), req.getFunctionName(), user);
-  }
-
-  public static TResultSet execGetColumns(
-      Frontend frontend, TMetadataOpRequest request, User user) throws ImpalaException {
-    TGetColumnsReq req = request.getGet_columns_req();
-    return MetadataOp.getColumns(frontend, req.getCatalogName(), req.getSchemaName(),
-        req.getTableName(), req.getColumnName(), user);
-  }
-
-  public static TResultSet execGetTables(
-      Frontend frontend, TMetadataOpRequest request, User user) throws ImpalaException {
-    TGetTablesReq req = request.getGet_tables_req();
-    return MetadataOp.getTables(frontend, req.getCatalogName(), req.getSchemaName(),
-        req.getTableName(), req.getTableTypes(), user);
-  }
-
-  public static TResultSet execGetSchemas(
-      Frontend frontend, TMetadataOpRequest request, User user) throws ImpalaException {
-    TGetSchemasReq req = request.getGet_schemas_req();
-    return MetadataOp.getSchemas(
-        frontend, req.getCatalogName(), req.getSchemaName(), user);
-  }
-}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-3/java/org/apache/impala/compat/MiniclusterProfile.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/compat/MiniclusterProfile.java b/fe/src/compat-minicluster-profile-3/java/org/apache/impala/compat/MiniclusterProfile.java
deleted file mode 100644
index 9f9c36c..0000000
--- a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/compat/MiniclusterProfile.java
+++ /dev/null
@@ -1,25 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.impala.compat;
-
-/**
- * Constant to tell us what Minicluster Profile we are built against.
- */
-public class MiniclusterProfile {
-  public static final int MINICLUSTER_PROFILE = 3;
-}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-3/java/org/apache/impala/util/SentryUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/util/SentryUtil.java b/fe/src/compat-minicluster-profile-3/java/org/apache/impala/util/SentryUtil.java
deleted file mode 100644
index f85e890..0000000
--- a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/util/SentryUtil.java
+++ /dev/null
@@ -1,54 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.impala.util;
-
-import java.util.Set;
-
-import org.apache.sentry.core.common.exception.SentryAccessDeniedException;
-import org.apache.sentry.core.common.exception.SentryAlreadyExistsException;
-import org.apache.sentry.core.common.exception.SentryGroupNotFoundException;
-import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient;
-import org.apache.sentry.provider.db.service.thrift.TSentryRole;
-// See IMPALA-5540. Sentry over-shades itself (to avoid leaking Thrift),
-// causing this unusual package name. In the code below, we typically
-// check for either variant when it's available in the classpath.
-import sentry.org.apache.sentry.core.common.exception.SentryUserException;
-
-/**
- * Wrapper to facilitate differences in Sentry APIs across Sentry versions.
- */
-public class SentryUtil {
-  static boolean isSentryAlreadyExists(Exception e) {
-    return e instanceof SentryAlreadyExistsException || e instanceof
-      sentry.org.apache.sentry.core.common.exception.SentryAlreadyExistsException;
-  }
-
-  static boolean isSentryAccessDenied(Exception e) {
-    return e instanceof SentryAccessDeniedException || e instanceof
-      sentry.org.apache.sentry.core.common.exception.SentryAccessDeniedException;
-  }
-
-  public static boolean isSentryGroupNotFound(Exception e) {
-    return e instanceof SentryGroupNotFoundException;
-  }
-
-  static Set<TSentryRole> listRoles(SentryPolicyServiceClient client, String username)
-      throws SentryUserException {
-    return client.listAllRoles(username);
-  }
-}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-3/test/java/org/apache/impala/authorization/ImpalaActionFactoryTest.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-3/test/java/org/apache/impala/authorization/ImpalaActionFactoryTest.java b/fe/src/compat-minicluster-profile-3/test/java/org/apache/impala/authorization/ImpalaActionFactoryTest.java
deleted file mode 100644
index bd39839..0000000
--- a/fe/src/compat-minicluster-profile-3/test/java/org/apache/impala/authorization/ImpalaActionFactoryTest.java
+++ /dev/null
@@ -1,132 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-package org.apache.impala.authorization;
-
-import com.google.common.collect.Lists;
-import org.apache.impala.authorization.Privilege.ImpalaAction;
-import org.apache.sentry.core.common.BitFieldAction;
-import org.junit.Test;
-
-import java.util.List;
-import java.util.Random;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.fail;
-
-public class ImpalaActionFactoryTest {
-  @Test
-  public void testGetActionsByCode() {
-    ImpalaActionFactory factory = new ImpalaActionFactory();
-
-    List<? extends BitFieldAction> actual = factory.getActionsByCode(
-        ImpalaAction.SELECT.getCode() |
-        ImpalaAction.INSERT.getCode() |
-        ImpalaAction.CREATE.getCode());
-    List<ImpalaAction> expected = Lists.newArrayList(
-        ImpalaAction.SELECT,
-        ImpalaAction.INSERT,
-        ImpalaAction.CREATE);
-    assertBitFieldActions(expected, actual);
-
-    actual = factory.getActionsByCode(
-        ImpalaAction.SELECT.getCode() |
-        ImpalaAction.INSERT.getCode() |
-        ImpalaAction.ALTER.getCode() |
-        ImpalaAction.CREATE.getCode() |
-        ImpalaAction.DROP.getCode() |
-        ImpalaAction.REFRESH.getCode());
-    expected = Lists.newArrayList(
-         ImpalaAction.SELECT,
-         ImpalaAction.INSERT,
-         ImpalaAction.ALTER,
-         ImpalaAction.CREATE,
-         ImpalaAction.DROP,
-         ImpalaAction.REFRESH,
-         ImpalaAction.ALL);
-     assertBitFieldActions(expected, actual);
-
-    actual = factory.getActionsByCode(ImpalaAction.ALL.getCode());
-    expected = Lists.newArrayList(
-        ImpalaAction.SELECT,
-        ImpalaAction.INSERT,
-        ImpalaAction.ALTER,
-        ImpalaAction.CREATE,
-        ImpalaAction.DROP,
-        ImpalaAction.REFRESH,
-        ImpalaAction.ALL);
-    assertBitFieldActions(expected, actual);
-
-    try {
-      factory.getActionsByCode(Integer.MAX_VALUE);
-      fail("IllegalArgumentException should be thrown.");
-    } catch (IllegalArgumentException e) {
-      assertEquals(String.format("Action code must between 1 and %d.",
-          ImpalaAction.ALL.getCode()), e.getMessage());
-    }
-
-    try {
-      factory.getActionsByCode(Integer.MIN_VALUE);
-      fail("IllegalArgumentException should be thrown.");
-    } catch (IllegalArgumentException e) {
-      assertEquals(String.format("Action code must between 1 and %d.",
-          ImpalaAction.ALL.getCode()), e.getMessage());
-    }
-  }
-
-  private static void assertBitFieldActions(List<ImpalaAction> expected,
-      List<? extends BitFieldAction> actual) {
-    assertEquals(expected.size(), actual.size());
-    for (int i = 0; i < actual.size(); i++) {
-      assertEquals(expected.get(i).getValue(), actual.get(i).getValue());
-      assertEquals(expected.get(i).getCode(), actual.get(i).getActionCode());
-    }
-  }
-
-  @Test
-  public void testGetActionByName() {
-    ImpalaActionFactory impala = new ImpalaActionFactory();
-
-    for (ImpalaAction action : ImpalaAction.values()) {
-      testGetActionByName(impala, action, action.getValue());
-    }
-    assertNull(impala.getActionByName("foo"));
-  }
-
-  private static void testGetActionByName(ImpalaActionFactory impala,
-      ImpalaAction expected, String name) {
-    assertEquals(toBitFieldAction(expected),
-        impala.getActionByName(name.toUpperCase()));
-    assertEquals(toBitFieldAction(expected),
-        impala.getActionByName(name.toLowerCase()));
-    assertEquals(toBitFieldAction(expected),
-        impala.getActionByName(randomizeCaseSensitivity(name)));
-  }
-
-  private static String randomizeCaseSensitivity(String str) {
-    char[] chars = str.toCharArray();
-    Random random = new Random(System.currentTimeMillis());
-    for (int i = 0; i < chars.length; i++) {
-      chars[i] = (random.nextBoolean()) ? Character.toUpperCase(chars[i]) : chars[i];
-    }
-    return new String(chars);
-  }
-
-  private static BitFieldAction toBitFieldAction(ImpalaAction action) {
-    return new BitFieldAction(action.getValue(), action.getCode());
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-3/test/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssignment.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-3/test/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssignment.java b/fe/src/compat-minicluster-profile-3/test/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssignment.java
deleted file mode 100644
index 85f8510..0000000
--- a/fe/src/compat-minicluster-profile-3/test/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssignment.java
+++ /dev/null
@@ -1,164 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.impala.datagenerator;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.util.PairOfSameType;
-import org.apache.hadoop.hbase.util.Threads;
-import org.apache.impala.planner.HBaseScanNode;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
-
-/**
- * Deterministically assign regions to region servers.
- */
-public class HBaseTestDataRegionAssignment {
-  public class TableNotFoundException extends Exception {
-    public TableNotFoundException(String s) {
-      super(s);
-    }
-  }
-
-  private final static Logger LOG = LoggerFactory.getLogger(
-      HBaseTestDataRegionAssignment.class);
-  private final Configuration conf;
-  private Connection connection = null;
-  private final Admin admin;
-  private final List<ServerName> sortedRS; // sorted list of region server name
-  private final String[] splitPoints = { "1", "3", "5", "7", "9"};
-
-  private final static int REGION_MOVE_TIMEOUT_MILLIS = 60000;
-
-  public HBaseTestDataRegionAssignment() throws IOException {
-    conf = new Configuration();
-    connection = ConnectionFactory.createConnection(conf);
-    admin = connection.getAdmin();
-    ClusterStatus clusterStatus = admin.getClusterStatus();
-    List<ServerName> regionServerNames =
-        new ArrayList<ServerName>(clusterStatus.getServers());
-    ServerName master = clusterStatus.getMaster();
-    regionServerNames.remove(master);
-    sortedRS = new ArrayList<ServerName>(regionServerNames);
-    Collections.sort(sortedRS);
-  }
-
-  public void close() throws IOException {
-    admin.close();
-  }
-
-  /**
-   * The table comes in already split into regions specified by splitPoints and with data
-   * already loaded. Pair up adjacent regions and assign to the same server.
-   * Each region pair in ([unbound:1,1:3], [3:5,5:7], [7:9,9:unbound])
-   * will be on the same server.
-   */
-  public void performAssignment(String tableName) throws IOException,
-    InterruptedException, TableNotFoundException {
-    TableName table = TableName.valueOf(tableName);
-    if (!admin.tableExists(table)) {
-      throw new TableNotFoundException("Table " + tableName + " not found.");
-    }
-
-    // Sort the region by start key
-    List<RegionInfo> regions = admin.getRegions(table);
-    Preconditions.checkArgument(regions.size() == splitPoints.length + 1);
-    Collections.sort(regions, RegionInfo.COMPARATOR);
-    // Pair up two adjacent regions to the same region server. That is,
-    // region server 1 <- regions (unbound:1), (1:3)
-    // region server 2 <- regions (3:5), (5:7)
-    // region server 3 <- regions (7:9), (9:unbound)
-    HashMap<String, ServerName> expectedLocs = Maps.newHashMap();
-    for (int i = 0; i < regions.size(); ++i) {
-      RegionInfo regionInfo = regions.get(i);
-      int rsIdx = (i / 2) % sortedRS.size();
-      ServerName regionServerName = sortedRS.get(rsIdx);
-      LOG.info("Moving " + regionInfo.getRegionNameAsString() +
-               " to " + regionServerName.getAddress());
-      admin.move(regionInfo.getEncodedNameAsBytes(),
-          regionServerName.getServerName().getBytes());
-      expectedLocs.put(regionInfo.getRegionNameAsString(), regionServerName);
-    }
-
-    // admin.move() is an asynchronous operation. Wait for the move to complete.
-    // It should be done in 60 sec.
-    long start = System.currentTimeMillis();
-    long timeout = System.currentTimeMillis() + REGION_MOVE_TIMEOUT_MILLIS;
-    while (true) {
-      int matched = 0;
-      List<Pair<RegionInfo, ServerName>> pairs =
-          MetaTableAccessor.getTableRegionsAndLocations(connection, table);
-      Preconditions.checkState(pairs.size() == regions.size());
-      for (Pair<RegionInfo, ServerName> pair: pairs) {
-        RegionInfo regionInfo = pair.getFirst();
-        String regionName = regionInfo.getRegionNameAsString();
-        ServerName serverName = pair.getSecond();
-        Preconditions.checkNotNull(expectedLocs.get(regionName));
-        LOG.info(regionName + " " + HBaseScanNode.printKey(regionInfo.getStartKey()) +
-            " -> " +  serverName.getAddress().toString() + ", expecting " +
-            expectedLocs.get(regionName));
-        if (expectedLocs.get(regionName).equals(serverName)) {
-           ++matched;
-           continue;
-        }
-      }
-      if (matched == regions.size()) {
-        long elapsed = System.currentTimeMillis() - start;
-        LOG.info("Regions moved after " + elapsed + " millis.");
-        break;
-      }
-      if (System.currentTimeMillis() < timeout) {
-        Thread.sleep(100);
-        continue;
-      }
-      throw new IllegalStateException(
-          String.format("Failed to assign regions to servers after " +
-            REGION_MOVE_TIMEOUT_MILLIS + " millis."));
-    }
-
-    // Force a major compaction such that the HBase table is backed by deterministic
-    // physical artifacts (files, WAL, etc.). Our #rows estimate relies on the sizes of
-    // these physical artifacts.
-    LOG.info("Major compacting HBase table: " + tableName);
-    admin.majorCompact(table);
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/main/java/org/apache/impala/analysis/ParquetHelper.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ParquetHelper.java b/fe/src/main/java/org/apache/impala/analysis/ParquetHelper.java
new file mode 100644
index 0000000..8c9bff8
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/ParquetHelper.java
@@ -0,0 +1,341 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.analysis;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Maps;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.parquet.hadoop.metadata.ParquetMetadata;
+import org.apache.parquet.hadoop.ParquetFileReader;
+import org.apache.parquet.schema.OriginalType;
+import org.apache.parquet.schema.PrimitiveType;
+
+import org.apache.impala.catalog.ArrayType;
+import org.apache.impala.catalog.MapType;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.StructField;
+import org.apache.impala.catalog.StructType;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.FileSystemUtil;
+
+/**
+ * Provides extractParquetSchema() to extract a schema
+ * from a parquet file.
+ *
+ * Because Parquet's Java package changed between Parquet 1.5
+ * and 1.9, a second copy of this file, with "org.apache.parquet." replaced
+ * with "org.apache.org.apache.parquet." is generated by the build system.
+ */
+class ParquetHelper {
+  private final static String ERROR_MSG =
+      "Failed to convert Parquet type\n%s\nto an Impala %s type:\n%s\n";
+
+  /**
+   * Reads the first block from the given HDFS file and returns the Parquet schema.
+   * Throws Analysis exception for any failure, such as failing to read the file
+   * or failing to parse the contents.
+   */
+  private static org.apache.parquet.schema.MessageType loadParquetSchema(Path pathToFile)
+      throws AnalysisException {
+    try {
+      FileSystem fs = pathToFile.getFileSystem(FileSystemUtil.getConfiguration());
+      if (!fs.isFile(pathToFile)) {
+        throw new AnalysisException("Cannot infer schema, path is not a file: " +
+                                    pathToFile);
+      }
+    } catch (IOException e) {
+      throw new AnalysisException("Failed to connect to filesystem:" + e);
+    } catch (IllegalArgumentException e) {
+      throw new AnalysisException(e.getMessage());
+    }
+    ParquetMetadata readFooter = null;
+    try {
+      readFooter = ParquetFileReader.readFooter(FileSystemUtil.getConfiguration(),
+          pathToFile);
+    } catch (FileNotFoundException e) {
+      throw new AnalysisException("File not found: " + e);
+    } catch (IOException e) {
+      throw new AnalysisException("Failed to open file as a parquet file: " + e);
+    } catch (RuntimeException e) {
+      // Parquet throws a generic RuntimeException when reading a non-parquet file
+      if (e.toString().contains("is not a Parquet file")) {
+        throw new AnalysisException("File is not a parquet file: " + pathToFile);
+      }
+      // otherwise, who knows what we caught, throw it back up
+      throw e;
+    }
+     return readFooter.getFileMetaData().getSchema();
+  }
+
+  /**
+   * Converts a "primitive" Parquet type to an Impala type.
+   * A primitive type is a non-nested type with no annotations.
+   */
+  private static Type convertPrimitiveParquetType(org.apache.parquet.schema.Type parquetType)
+      throws AnalysisException {
+    Preconditions.checkState(parquetType.isPrimitive());
+    PrimitiveType prim = parquetType.asPrimitiveType();
+    switch (prim.getPrimitiveTypeName()) {
+      case BINARY: return Type.STRING;
+      case BOOLEAN: return Type.BOOLEAN;
+      case DOUBLE: return Type.DOUBLE;
+      case FIXED_LEN_BYTE_ARRAY:
+        throw new AnalysisException(
+            "Unsupported parquet type FIXED_LEN_BYTE_ARRAY for field " +
+                parquetType.getName());
+      case FLOAT: return Type.FLOAT;
+      case INT32: return Type.INT;
+      case INT64: return Type.BIGINT;
+      case INT96: return Type.TIMESTAMP;
+      default:
+        Preconditions.checkState(false, "Unexpected parquet primitive type: " +
+               prim.getPrimitiveTypeName());
+        return null;
+    }
+  }
+
+  /**
+   * Converts a Parquet group type to an Impala map Type. We support both standard
+   * Parquet map representations, as well as legacy. Legacy representations are handled
+   * according to this specification:
+   * https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#backward-compatibility-rules-1
+   *
+   * Standard representation of a map in Parquet:
+   * <optional | required> group <name> (MAP) { <-- outerGroup is pointing at this
+   * repeated group key_value {
+   *     required <key-type> key;
+   *     <optional | required> <value-type> value;
+   *   }
+   * }
+   */
+  private static MapType convertMap(org.apache.parquet.schema.GroupType outerGroup)
+      throws AnalysisException {
+    if (outerGroup.getFieldCount() != 1){
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "MAP", "The logical MAP type must have exactly 1 inner field."));
+    }
+
+    org.apache.parquet.schema.Type innerField = outerGroup.getType(0);
+    if (!innerField.isRepetition(org.apache.parquet.schema.Type.Repetition.REPEATED)){
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "MAP", "The logical MAP type must have a repeated inner field."));
+    }
+    if (innerField.isPrimitive()) {
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "MAP", "The inner field of the logical MAP type must be a group."));
+    }
+
+    org.apache.parquet.schema.GroupType innerGroup = innerField.asGroupType();
+    // It does not matter whether innerGroup has an annotation or not (for example it may
+    // be annotated with MAP_KEY_VALUE). We treat the case that innerGroup has an
+    // annotation and the case the innerGroup does not have an annotation the same.
+    if (innerGroup.getFieldCount() != 2) {
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "MAP", "The inner field of the logical MAP type must have exactly 2 fields."));
+    }
+
+    org.apache.parquet.schema.Type key = innerGroup.getType(0);
+    if (!key.getName().equals("key")) {
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "MAP", "The name of the first field of the inner field of the logical MAP " +
+          "type must be 'key'"));
+    }
+    if (!key.isPrimitive()) {
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "MAP", "The key type of the logical MAP type must be primitive."));
+    }
+    org.apache.parquet.schema.Type value = innerGroup.getType(1);
+    if (!value.getName().equals("value")) {
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "MAP", "The name of the second field of the inner field of the logical MAP " +
+          "type must be 'value'"));
+    }
+
+    return new MapType(convertParquetType(key), convertParquetType(value));
+  }
+
+  /**
+   * Converts a Parquet group type to an Impala struct Type.
+   */
+  private static StructType convertStruct(org.apache.parquet.schema.GroupType outerGroup)
+      throws AnalysisException {
+    ArrayList<StructField> structFields = new ArrayList<StructField>();
+    for (org.apache.parquet.schema.Type field: outerGroup.getFields()) {
+      StructField f = new StructField(field.getName(), convertParquetType(field));
+      structFields.add(f);
+    }
+    return new StructType(structFields);
+  }
+
+  /**
+   * Converts a Parquet group type to an Impala array Type. We can handle the standard
+   * representation, but also legacy representations for backwards compatibility.
+   * Legacy representations are handled according to this specification:
+   * https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#backward-compatibility-rules
+   *
+   * Standard representation of an array in Parquet:
+   * <optional | required> group <name> (LIST) { <-- outerGroup is pointing at this
+   *   repeated group list {
+   *     <optional | required> <element-type> element;
+   *   }
+   * }
+   */
+  private static ArrayType convertArray(org.apache.parquet.schema.GroupType outerGroup)
+      throws AnalysisException {
+    if (outerGroup.getFieldCount() != 1) {
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "LIST", "The logical LIST type must have exactly 1 inner field."));
+    }
+
+    org.apache.parquet.schema.Type innerField = outerGroup.getType(0);
+    if (!innerField.isRepetition(org.apache.parquet.schema.Type.Repetition.REPEATED)) {
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "LIST", "The inner field of the logical LIST type must be repeated."));
+    }
+    if (innerField.isPrimitive() || innerField.getOriginalType() != null) {
+      // From the Parquet Spec:
+      // 1. If the repeated field is not a group then it's type is the element type.
+      //
+      // If innerField is a group, but originalType is not null, the element type is
+      // based on the logical type.
+      return new ArrayType(convertParquetType(innerField));
+    }
+
+    org.apache.parquet.schema.GroupType innerGroup = innerField.asGroupType();
+    if (innerGroup.getFieldCount() != 1) {
+      // From the Parquet Spec:
+      // 2. If the repeated field is a group with multiple fields, then it's type is a
+      //    struct.
+      return new ArrayType(convertStruct(innerGroup));
+    }
+
+    return new ArrayType(convertParquetType(innerGroup.getType(0)));
+  }
+
+  /**
+   * Converts a "logical" Parquet type to an Impala column type.
+   * A Parquet type is considered logical when it has an annotation. The annotation is
+   * stored as a "OriginalType". The Parquet documentation refers to these as logical
+   * types, so we use that terminology here.
+   */
+  private static Type convertLogicalParquetType(org.apache.parquet.schema.Type parquetType)
+      throws AnalysisException {
+    OriginalType orig = parquetType.getOriginalType();
+    if (orig == OriginalType.LIST) {
+      return convertArray(parquetType.asGroupType());
+    }
+    if (orig == OriginalType.MAP || orig == OriginalType.MAP_KEY_VALUE) {
+      // MAP_KEY_VALUE annotation should not be used any more. However, according to the
+      // Parquet spec, some existing data incorrectly uses MAP_KEY_VALUE in place of MAP.
+      // For backward-compatibility, a group annotated with MAP_KEY_VALUE that is not
+      // contained by a MAP-annotated group should be handled as a MAP-annotated group.
+      return convertMap(parquetType.asGroupType());
+    }
+
+    PrimitiveType prim = parquetType.asPrimitiveType();
+    if (prim.getPrimitiveTypeName() == PrimitiveType.PrimitiveTypeName.BINARY &&
+        (orig == OriginalType.UTF8 || orig == OriginalType.ENUM)) {
+      // UTF8 is the type annotation Parquet uses for strings
+      // ENUM is the type annotation Parquet uses to indicate that
+      // the original data type, before conversion to parquet, had been enum.
+      // Applications which do not have enumerated types (e.g. Impala)
+      // should interpret it as a string.
+      // We check to make sure it applies to BINARY to avoid errors if there is a bad
+      // annotation.
+      return Type.STRING;
+    }
+
+    if (prim.getPrimitiveTypeName() == PrimitiveType.PrimitiveTypeName.INT32
+        || prim.getPrimitiveTypeName() == PrimitiveType.PrimitiveTypeName.INT64) {
+      // Map signed integer types to an supported Impala column type
+      switch (orig) {
+        case INT_8: return Type.TINYINT;
+        case INT_16: return Type.SMALLINT;
+        case INT_32: return Type.INT;
+        case INT_64: return Type.BIGINT;
+      }
+    }
+
+    if (orig == OriginalType.DECIMAL) {
+      return ScalarType.createDecimalType(prim.getDecimalMetadata().getPrecision(),
+                                           prim.getDecimalMetadata().getScale());
+    }
+
+    throw new AnalysisException(
+        "Unsupported logical parquet type " + orig + " (primitive type is " +
+            prim.getPrimitiveTypeName().name() + ") for field " +
+            parquetType.getName());
+  }
+
+  /**
+   * Converts a Parquet type into an Impala type.
+   */
+  private static Type convertParquetType(org.apache.parquet.schema.Type field)
+      throws AnalysisException {
+    Type type = null;
+    // TODO for 2.3: If a field is not annotated with LIST, it can still be sometimes
+    // interpreted as an array. The following 2 examples should be interpreted as an array
+    // of integers, but this is currently not done.
+    // 1. repeated int int_col;
+    // 2. required group int_arr {
+    //      repeated group list {
+    //        required int element;
+    //      }
+    //    }
+    if (field.getOriginalType() != null) {
+      type = convertLogicalParquetType(field);
+    } else if (field.isPrimitive()) {
+      type = convertPrimitiveParquetType(field);
+    } else {
+      // If field is not primitive, it must be a struct.
+      type = convertStruct(field.asGroupType());
+    }
+    return type;
+  }
+
+  /**
+   * Parses a Parquet file stored in HDFS and returns the corresponding Impala schema.
+   * This fails with an analysis exception if any errors occur reading the file,
+   * parsing the Parquet schema, or if the Parquet types cannot be represented in Impala.
+   */
+  static List<ColumnDef> extractParquetSchema(HdfsUri location)
+      throws AnalysisException {
+    org.apache.parquet.schema.MessageType parquetSchema = loadParquetSchema(location.getPath());
+    List<org.apache.parquet.schema.Type> fields = parquetSchema.getFields();
+    List<ColumnDef> schema = new ArrayList<ColumnDef>();
+
+    for (org.apache.parquet.schema.Type field: fields) {
+      Type type = convertParquetType(field);
+      Preconditions.checkNotNull(type);
+      String colName = field.getName();
+      Map<ColumnDef.Option, Object> option = Maps.newHashMap();
+      option.put(ColumnDef.Option.COMMENT, "Inferred from Parquet file.");
+      schema.add(new ColumnDef(colName, new TypeDef(type), option));
+    }
+    return schema;
+  }
+}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/main/java/org/apache/impala/authorization/ImpalaActionFactory.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/ImpalaActionFactory.java b/fe/src/main/java/org/apache/impala/authorization/ImpalaActionFactory.java
new file mode 100644
index 0000000..c3ef004
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/authorization/ImpalaActionFactory.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.impala.authorization;
+
+import com.google.common.base.Preconditions;
+import org.apache.impala.authorization.Privilege.ImpalaAction;
+import org.apache.sentry.core.common.BitFieldAction;
+import org.apache.sentry.core.common.BitFieldActionFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * An implementation of BitFieldActionFactory for Impala.
+ */
+public class ImpalaActionFactory extends BitFieldActionFactory {
+  @Override
+  public List<? extends BitFieldAction> getActionsByCode(int actionCode) {
+    Preconditions.checkArgument(
+        actionCode >= 1 && actionCode <= ImpalaAction.ALL.getCode(),
+        String.format("Action code must between 1 and %d.", ImpalaAction.ALL.getCode()));
+
+    List<BitFieldAction> actions = new ArrayList<>();
+    for (ImpalaAction action : ImpalaAction.values()) {
+      if ((action.getCode() & actionCode) == action.getCode()) {
+        actions.add(action.getBitFieldAction());
+      }
+    }
+    return actions;
+  }
+
+  @Override
+  public BitFieldAction getActionByName(String name) {
+    Preconditions.checkNotNull(name);
+
+    for (ImpalaAction action : ImpalaAction.values()) {
+      if (action.getValue().equalsIgnoreCase(name)) {
+        return action.getBitFieldAction();
+      }
+    }
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/main/java/org/apache/impala/authorization/ImpalaPrivilegeModel.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/ImpalaPrivilegeModel.java b/fe/src/main/java/org/apache/impala/authorization/ImpalaPrivilegeModel.java
new file mode 100644
index 0000000..43a194e
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/authorization/ImpalaPrivilegeModel.java
@@ -0,0 +1,43 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.impala.authorization;
+
+import java.util.Map;
+
+import org.apache.sentry.core.common.BitFieldActionFactory;
+import org.apache.sentry.core.common.ImplyMethodType;
+import org.apache.sentry.core.model.db.HivePrivilegeModel;
+import org.apache.sentry.core.common.Model;
+
+/**
+ * Delegates to HivePrivilegeModel for getImplyMethodMap(), but
+ * uses Impala's BitFieldActionFactory implementation.
+ */
+public class ImpalaPrivilegeModel implements Model {
+  public static final ImpalaPrivilegeModel INSTANCE = new ImpalaPrivilegeModel();
+  private final ImpalaActionFactory actionFactory = new ImpalaActionFactory();
+
+  @Override
+  public Map<String, ImplyMethodType> getImplyMethodMap() {
+    return HivePrivilegeModel.getInstance().getImplyMethodMap();
+  }
+
+  @Override
+  public BitFieldActionFactory getBitFieldActionFactory() {
+    return actionFactory;
+  }
+}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/main/java/org/apache/impala/authorization/SentryAuthProvider.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/SentryAuthProvider.java b/fe/src/main/java/org/apache/impala/authorization/SentryAuthProvider.java
new file mode 100644
index 0000000..a4f0743
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/authorization/SentryAuthProvider.java
@@ -0,0 +1,80 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.authorization;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.impala.catalog.AuthorizationPolicy;
+
+import org.apache.commons.lang.reflect.ConstructorUtils;
+import org.apache.sentry.core.common.Model;
+import org.apache.sentry.core.model.db.HivePrivilegeModel;
+import org.apache.sentry.policy.common.PolicyEngine;
+import org.apache.sentry.policy.engine.common.CommonPolicyEngine;
+import org.apache.sentry.provider.cache.SimpleCacheProviderBackend;
+import org.apache.sentry.provider.common.ProviderBackend;
+import org.apache.sentry.provider.common.ProviderBackendContext;
+import org.apache.sentry.provider.common.ResourceAuthorizationProvider;
+import org.apache.sentry.provider.file.SimpleFileProviderBackend;
+
+/**
+ * Wrapper to facilitate differences in Sentry APIs across Sentry versions.
+ */
+class SentryAuthProvider {
+  /*
+   * Creates a new ResourceAuthorizationProvider based on the given configuration.
+   */
+  static ResourceAuthorizationProvider createProvider(AuthorizationConfig config,
+      AuthorizationPolicy policy) {
+    try {
+      ProviderBackend providerBe;
+      // Create the appropriate backend provider.
+      if (config.isFileBasedPolicy()) {
+        providerBe = new SimpleFileProviderBackend(config.getSentryConfig().getConfig(),
+            config.getPolicyFile());
+        ProviderBackendContext context = new ProviderBackendContext();
+        providerBe.initialize(context);
+      } else {
+        // Note: The second parameter to the ProviderBackend is a "resourceFile" path
+        // which is not used by Impala. We cannot pass 'null' so instead pass an empty
+        // string.
+        providerBe = new SimpleCacheProviderBackend(config.getSentryConfig().getConfig(),
+            "");
+        Preconditions.checkNotNull(policy);
+        ProviderBackendContext context = new ProviderBackendContext();
+        context.setBindingHandle(policy);
+        providerBe.initialize(context);
+      }
+
+      CommonPolicyEngine engine =
+          new CommonPolicyEngine(providerBe);
+
+      // Try to create an instance of the specified policy provider class.
+      // Re-throw any exceptions that are encountered.
+      String policyFile = config.getPolicyFile() == null ? "" : config.getPolicyFile();
+
+      return (ResourceAuthorizationProvider) ConstructorUtils.invokeConstructor(
+          Class.forName(config.getPolicyProviderClassName()),
+          new Object[] {policyFile, engine, ImpalaPrivilegeModel.INSTANCE});
+    } catch (Exception e) {
+      // Re-throw as unchecked exception.
+      throw new IllegalStateException(
+          "Error creating ResourceAuthorizationProvider: ", e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/main/java/org/apache/impala/compat/HdfsShim.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/compat/HdfsShim.java b/fe/src/main/java/org/apache/impala/compat/HdfsShim.java
new file mode 100644
index 0000000..9453f80
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/compat/HdfsShim.java
@@ -0,0 +1,30 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.compat;
+
+import org.apache.hadoop.fs.FileStatus;
+
+/**
+ * Wrapper classes to abstract away differences between HDFS versions in
+ * the MiniCluster profiles.
+ */
+public class HdfsShim {
+  public static boolean isErasureCoded(FileStatus fileStatus) {
+    return fileStatus.isErasureCoded();
+  }
+}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/main/java/org/apache/impala/compat/MetastoreShim.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/compat/MetastoreShim.java b/fe/src/main/java/org/apache/impala/compat/MetastoreShim.java
new file mode 100644
index 0000000..3d69545
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/compat/MetastoreShim.java
@@ -0,0 +1,127 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.compat;
+
+import java.util.List;
+
+import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hive.service.rpc.thrift.TGetColumnsReq;
+import org.apache.hive.service.rpc.thrift.TGetFunctionsReq;
+import org.apache.hive.service.rpc.thrift.TGetSchemasReq;
+import org.apache.hive.service.rpc.thrift.TGetTablesReq;
+import org.apache.impala.authorization.User;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.Pair;
+import org.apache.impala.service.Frontend;
+import org.apache.impala.service.MetadataOp;
+import org.apache.impala.thrift.TMetadataOpRequest;
+import org.apache.impala.thrift.TResultSet;
+import org.apache.thrift.TException;
+
+/**
+ * A wrapper around some of Hive's Metastore API's to abstract away differences
+ * between major versions of Hive. This implements the shimmed methods for Hive 2.
+ */
+public class MetastoreShim {
+  /**
+   * Wrapper around MetaStoreUtils.validateName() to deal with added arguments.
+   */
+  public static boolean validateName(String name) {
+    return MetaStoreUtils.validateName(name, null);
+  }
+
+  /**
+   * Wrapper around IMetaStoreClient.alter_partition() to deal with added
+   * arguments.
+   */
+  public static void alterPartition(IMetaStoreClient client, Partition partition)
+      throws InvalidOperationException, MetaException, TException {
+    client.alter_partition(
+        partition.getDbName(), partition.getTableName(), partition, null);
+  }
+
+  /**
+   * Wrapper around IMetaStoreClient.alter_partitions() to deal with added
+   * arguments.
+   */
+  public static void alterPartitions(IMetaStoreClient client, String dbName,
+      String tableName, List<Partition> partitions)
+      throws InvalidOperationException, MetaException, TException {
+    client.alter_partitions(dbName, tableName, partitions, null);
+  }
+
+  /**
+   * Wrapper around MetaStoreUtils.updatePartitionStatsFast() to deal with added
+   * arguments.
+   */
+  public static void updatePartitionStatsFast(Partition partition, Warehouse warehouse)
+      throws MetaException {
+    MetaStoreUtils.updatePartitionStatsFast(partition, warehouse, null);
+  }
+
+  /**
+   * Return the maximum number of Metastore objects that should be retrieved in
+   * a batch.
+   */
+  public static String metastoreBatchRetrieveObjectsMaxConfigKey() {
+    return HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX.toString();
+  }
+
+  /**
+   * Return the key and value that should be set in the partition parameters to
+   * mark that the stats were generated automatically by a stats task.
+   */
+  public static Pair<String, String> statsGeneratedViaStatsTaskParam() {
+    return Pair.create(StatsSetupConst.STATS_GENERATED, StatsSetupConst.TASK);
+  }
+
+  public static TResultSet execGetFunctions(
+      Frontend frontend, TMetadataOpRequest request, User user) throws ImpalaException {
+    TGetFunctionsReq req = request.getGet_functions_req();
+    return MetadataOp.getFunctions(
+        frontend, req.getCatalogName(), req.getSchemaName(), req.getFunctionName(), user);
+  }
+
+  public static TResultSet execGetColumns(
+      Frontend frontend, TMetadataOpRequest request, User user) throws ImpalaException {
+    TGetColumnsReq req = request.getGet_columns_req();
+    return MetadataOp.getColumns(frontend, req.getCatalogName(), req.getSchemaName(),
+        req.getTableName(), req.getColumnName(), user);
+  }
+
+  public static TResultSet execGetTables(
+      Frontend frontend, TMetadataOpRequest request, User user) throws ImpalaException {
+    TGetTablesReq req = request.getGet_tables_req();
+    return MetadataOp.getTables(frontend, req.getCatalogName(), req.getSchemaName(),
+        req.getTableName(), req.getTableTypes(), user);
+  }
+
+  public static TResultSet execGetSchemas(
+      Frontend frontend, TMetadataOpRequest request, User user) throws ImpalaException {
+    TGetSchemasReq req = request.getGet_schemas_req();
+    return MetadataOp.getSchemas(
+        frontend, req.getCatalogName(), req.getSchemaName(), user);
+  }
+}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/main/java/org/apache/impala/util/SentryUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/SentryUtil.java b/fe/src/main/java/org/apache/impala/util/SentryUtil.java
new file mode 100644
index 0000000..f85e890
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/util/SentryUtil.java
@@ -0,0 +1,54 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.util;
+
+import java.util.Set;
+
+import org.apache.sentry.core.common.exception.SentryAccessDeniedException;
+import org.apache.sentry.core.common.exception.SentryAlreadyExistsException;
+import org.apache.sentry.core.common.exception.SentryGroupNotFoundException;
+import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient;
+import org.apache.sentry.provider.db.service.thrift.TSentryRole;
+// See IMPALA-5540. Sentry over-shades itself (to avoid leaking Thrift),
+// causing this unusual package name. In the code below, we typically
+// check for either variant when it's available in the classpath.
+import sentry.org.apache.sentry.core.common.exception.SentryUserException;
+
+/**
+ * Wrapper to facilitate differences in Sentry APIs across Sentry versions.
+ */
+public class SentryUtil {
+  static boolean isSentryAlreadyExists(Exception e) {
+    return e instanceof SentryAlreadyExistsException || e instanceof
+      sentry.org.apache.sentry.core.common.exception.SentryAlreadyExistsException;
+  }
+
+  static boolean isSentryAccessDenied(Exception e) {
+    return e instanceof SentryAccessDeniedException || e instanceof
+      sentry.org.apache.sentry.core.common.exception.SentryAccessDeniedException;
+  }
+
+  public static boolean isSentryGroupNotFound(Exception e) {
+    return e instanceof SentryGroupNotFoundException;
+  }
+
+  static Set<TSentryRole> listRoles(SentryPolicyServiceClient client, String username)
+      throws SentryUserException {
+    return client.listAllRoles(username);
+  }
+}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java b/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
index 2a45c3b..e0bd062 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
@@ -43,7 +43,6 @@ import org.apache.impala.common.FrontendTestBase;
 import org.apache.impala.common.ImpalaException;
 import org.apache.impala.common.InternalException;
 import org.apache.impala.common.RuntimeEnv;
-import org.apache.impala.compat.MiniclusterProfile;
 import org.apache.impala.service.Frontend;
 import org.apache.impala.testutil.ImpaladTestCatalog;
 import org.apache.impala.thrift.TMetadataOpRequest;
@@ -824,13 +823,11 @@ public class AuthorizationTest extends FrontendTestBase {
   public void TestShortUsernameUsed() throws Exception {
     // Different long variations of the same username.
     List<User> users = Lists.newArrayList(
-        // Hadoop 2 accepts kerberos names missing a realm, but insists
-        // on having a terminating '@' even when the default realm
-        // is intended.  Hadoop 3 now has more normal name convetions,
-        // where to specify the default realm, everything after and
-        // including the '@' character is omitted.
-        new User(USER.getName() + "/abc.host.com" +
-          (MiniclusterProfile.MINICLUSTER_PROFILE == 3 ? "" : "@")),
+        // Historical note: Hadoop 2 accepts kerberos names missing a realm, but insists
+        // on having a terminating '@' even when the default realm is intended. Hadoop 3
+        // now has more normal name conventions, where to specify the default realm,
+        // everything after and including the '@' character is omitted.
+        new User(USER.getName() + "/abc.host.com"),
         new User(USER.getName() + "/abc.host.com@REAL.COM"),
         new User(USER.getName() + "@REAL.COM"));
     for (User user: users) {

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/test/java/org/apache/impala/authorization/ImpalaActionFactoryTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/authorization/ImpalaActionFactoryTest.java b/fe/src/test/java/org/apache/impala/authorization/ImpalaActionFactoryTest.java
new file mode 100644
index 0000000..bd39839
--- /dev/null
+++ b/fe/src/test/java/org/apache/impala/authorization/ImpalaActionFactoryTest.java
@@ -0,0 +1,132 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.impala.authorization;
+
+import com.google.common.collect.Lists;
+import org.apache.impala.authorization.Privilege.ImpalaAction;
+import org.apache.sentry.core.common.BitFieldAction;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Random;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
+
+public class ImpalaActionFactoryTest {
+  @Test
+  public void testGetActionsByCode() {
+    ImpalaActionFactory factory = new ImpalaActionFactory();
+
+    List<? extends BitFieldAction> actual = factory.getActionsByCode(
+        ImpalaAction.SELECT.getCode() |
+        ImpalaAction.INSERT.getCode() |
+        ImpalaAction.CREATE.getCode());
+    List<ImpalaAction> expected = Lists.newArrayList(
+        ImpalaAction.SELECT,
+        ImpalaAction.INSERT,
+        ImpalaAction.CREATE);
+    assertBitFieldActions(expected, actual);
+
+    actual = factory.getActionsByCode(
+        ImpalaAction.SELECT.getCode() |
+        ImpalaAction.INSERT.getCode() |
+        ImpalaAction.ALTER.getCode() |
+        ImpalaAction.CREATE.getCode() |
+        ImpalaAction.DROP.getCode() |
+        ImpalaAction.REFRESH.getCode());
+    expected = Lists.newArrayList(
+         ImpalaAction.SELECT,
+         ImpalaAction.INSERT,
+         ImpalaAction.ALTER,
+         ImpalaAction.CREATE,
+         ImpalaAction.DROP,
+         ImpalaAction.REFRESH,
+         ImpalaAction.ALL);
+     assertBitFieldActions(expected, actual);
+
+    actual = factory.getActionsByCode(ImpalaAction.ALL.getCode());
+    expected = Lists.newArrayList(
+        ImpalaAction.SELECT,
+        ImpalaAction.INSERT,
+        ImpalaAction.ALTER,
+        ImpalaAction.CREATE,
+        ImpalaAction.DROP,
+        ImpalaAction.REFRESH,
+        ImpalaAction.ALL);
+    assertBitFieldActions(expected, actual);
+
+    try {
+      factory.getActionsByCode(Integer.MAX_VALUE);
+      fail("IllegalArgumentException should be thrown.");
+    } catch (IllegalArgumentException e) {
+      assertEquals(String.format("Action code must between 1 and %d.",
+          ImpalaAction.ALL.getCode()), e.getMessage());
+    }
+
+    try {
+      factory.getActionsByCode(Integer.MIN_VALUE);
+      fail("IllegalArgumentException should be thrown.");
+    } catch (IllegalArgumentException e) {
+      assertEquals(String.format("Action code must between 1 and %d.",
+          ImpalaAction.ALL.getCode()), e.getMessage());
+    }
+  }
+
+  private static void assertBitFieldActions(List<ImpalaAction> expected,
+      List<? extends BitFieldAction> actual) {
+    assertEquals(expected.size(), actual.size());
+    for (int i = 0; i < actual.size(); i++) {
+      assertEquals(expected.get(i).getValue(), actual.get(i).getValue());
+      assertEquals(expected.get(i).getCode(), actual.get(i).getActionCode());
+    }
+  }
+
+  @Test
+  public void testGetActionByName() {
+    ImpalaActionFactory impala = new ImpalaActionFactory();
+
+    for (ImpalaAction action : ImpalaAction.values()) {
+      testGetActionByName(impala, action, action.getValue());
+    }
+    assertNull(impala.getActionByName("foo"));
+  }
+
+  private static void testGetActionByName(ImpalaActionFactory impala,
+      ImpalaAction expected, String name) {
+    assertEquals(toBitFieldAction(expected),
+        impala.getActionByName(name.toUpperCase()));
+    assertEquals(toBitFieldAction(expected),
+        impala.getActionByName(name.toLowerCase()));
+    assertEquals(toBitFieldAction(expected),
+        impala.getActionByName(randomizeCaseSensitivity(name)));
+  }
+
+  private static String randomizeCaseSensitivity(String str) {
+    char[] chars = str.toCharArray();
+    Random random = new Random(System.currentTimeMillis());
+    for (int i = 0; i < chars.length; i++) {
+      chars[i] = (random.nextBoolean()) ? Character.toUpperCase(chars[i]) : chars[i];
+    }
+    return new String(chars);
+  }
+
+  private static BitFieldAction toBitFieldAction(ImpalaAction action) {
+    return new BitFieldAction(action.getValue(), action.getCode());
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/test/java/org/apache/impala/common/FrontendTestBase.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/common/FrontendTestBase.java b/fe/src/test/java/org/apache/impala/common/FrontendTestBase.java
index 64d743c..7b22551 100644
--- a/fe/src/test/java/org/apache/impala/common/FrontendTestBase.java
+++ b/fe/src/test/java/org/apache/impala/common/FrontendTestBase.java
@@ -54,7 +54,6 @@ import org.apache.impala.catalog.ScalarType;
 import org.apache.impala.catalog.Table;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.catalog.View;
-import org.apache.impala.compat.MiniclusterProfile;
 import org.apache.impala.service.CatalogOpExecutor;
 import org.apache.impala.service.Frontend;
 import org.apache.impala.testutil.ImpaladTestCatalog;
@@ -409,15 +408,14 @@ public class FrontendTestBase {
       String errorString = e.getMessage();
       Preconditions.checkNotNull(errorString, "Stack trace lost during exception.");
       String msg = "got error:\n" + errorString + "\nexpected:\n" + expectedErrorString;
-      if (MiniclusterProfile.MINICLUSTER_PROFILE == 3) {
-        // Different versions of Hive have slightly different error messages;
-        // we normalize here as follows:
-        // 'No FileSystem for Scheme "x"' -> 'No FileSystem for scheme: x'
-        if (errorString.contains("No FileSystem for scheme ")) {
-          errorString = errorString.replace("\"", "");
-          errorString = errorString.replace("No FileSystem for scheme ",
-              "No FileSystem for scheme: ");
-        }
+      // TODO: This logic can be removed.
+      // Different versions of Hive have slightly different error messages;
+      // we normalize here as follows:
+      // 'No FileSystem for Scheme "x"' -> 'No FileSystem for scheme: x'
+      if (errorString.contains("No FileSystem for scheme ")) {
+        errorString = errorString.replace("\"", "");
+        errorString = errorString.replace("No FileSystem for scheme ",
+            "No FileSystem for scheme: ");
       }
       Assert.assertTrue(msg, errorString.startsWith(expectedErrorString));
       return;

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/test/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssignment.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssignment.java b/fe/src/test/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssignment.java
new file mode 100644
index 0000000..85f8510
--- /dev/null
+++ b/fe/src/test/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssignment.java
@@ -0,0 +1,164 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.datagenerator;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.PairOfSameType;
+import org.apache.hadoop.hbase.util.Threads;
+import org.apache.impala.planner.HBaseScanNode;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Maps;
+
+/**
+ * Deterministically assign regions to region servers.
+ */
+public class HBaseTestDataRegionAssignment {
+  public class TableNotFoundException extends Exception {
+    public TableNotFoundException(String s) {
+      super(s);
+    }
+  }
+
+  private final static Logger LOG = LoggerFactory.getLogger(
+      HBaseTestDataRegionAssignment.class);
+  private final Configuration conf;
+  private Connection connection = null;
+  private final Admin admin;
+  private final List<ServerName> sortedRS; // sorted list of region server name
+  private final String[] splitPoints = { "1", "3", "5", "7", "9"};
+
+  private final static int REGION_MOVE_TIMEOUT_MILLIS = 60000;
+
+  public HBaseTestDataRegionAssignment() throws IOException {
+    conf = new Configuration();
+    connection = ConnectionFactory.createConnection(conf);
+    admin = connection.getAdmin();
+    ClusterStatus clusterStatus = admin.getClusterStatus();
+    List<ServerName> regionServerNames =
+        new ArrayList<ServerName>(clusterStatus.getServers());
+    ServerName master = clusterStatus.getMaster();
+    regionServerNames.remove(master);
+    sortedRS = new ArrayList<ServerName>(regionServerNames);
+    Collections.sort(sortedRS);
+  }
+
+  public void close() throws IOException {
+    admin.close();
+  }
+
+  /**
+   * The table comes in already split into regions specified by splitPoints and with data
+   * already loaded. Pair up adjacent regions and assign to the same server.
+   * Each region pair in ([unbound:1,1:3], [3:5,5:7], [7:9,9:unbound])
+   * will be on the same server.
+   */
+  public void performAssignment(String tableName) throws IOException,
+    InterruptedException, TableNotFoundException {
+    TableName table = TableName.valueOf(tableName);
+    if (!admin.tableExists(table)) {
+      throw new TableNotFoundException("Table " + tableName + " not found.");
+    }
+
+    // Sort the region by start key
+    List<RegionInfo> regions = admin.getRegions(table);
+    Preconditions.checkArgument(regions.size() == splitPoints.length + 1);
+    Collections.sort(regions, RegionInfo.COMPARATOR);
+    // Pair up two adjacent regions to the same region server. That is,
+    // region server 1 <- regions (unbound:1), (1:3)
+    // region server 2 <- regions (3:5), (5:7)
+    // region server 3 <- regions (7:9), (9:unbound)
+    HashMap<String, ServerName> expectedLocs = Maps.newHashMap();
+    for (int i = 0; i < regions.size(); ++i) {
+      RegionInfo regionInfo = regions.get(i);
+      int rsIdx = (i / 2) % sortedRS.size();
+      ServerName regionServerName = sortedRS.get(rsIdx);
+      LOG.info("Moving " + regionInfo.getRegionNameAsString() +
+               " to " + regionServerName.getAddress());
+      admin.move(regionInfo.getEncodedNameAsBytes(),
+          regionServerName.getServerName().getBytes());
+      expectedLocs.put(regionInfo.getRegionNameAsString(), regionServerName);
+    }
+
+    // admin.move() is an asynchronous operation. Wait for the move to complete.
+    // It should be done in 60 sec.
+    long start = System.currentTimeMillis();
+    long timeout = System.currentTimeMillis() + REGION_MOVE_TIMEOUT_MILLIS;
+    while (true) {
+      int matched = 0;
+      List<Pair<RegionInfo, ServerName>> pairs =
+          MetaTableAccessor.getTableRegionsAndLocations(connection, table);
+      Preconditions.checkState(pairs.size() == regions.size());
+      for (Pair<RegionInfo, ServerName> pair: pairs) {
+        RegionInfo regionInfo = pair.getFirst();
+        String regionName = regionInfo.getRegionNameAsString();
+        ServerName serverName = pair.getSecond();
+        Preconditions.checkNotNull(expectedLocs.get(regionName));
+        LOG.info(regionName + " " + HBaseScanNode.printKey(regionInfo.getStartKey()) +
+            " -> " +  serverName.getAddress().toString() + ", expecting " +
+            expectedLocs.get(regionName));
+        if (expectedLocs.get(regionName).equals(serverName)) {
+           ++matched;
+           continue;
+        }
+      }
+      if (matched == regions.size()) {
+        long elapsed = System.currentTimeMillis() - start;
+        LOG.info("Regions moved after " + elapsed + " millis.");
+        break;
+      }
+      if (System.currentTimeMillis() < timeout) {
+        Thread.sleep(100);
+        continue;
+      }
+      throw new IllegalStateException(
+          String.format("Failed to assign regions to servers after " +
+            REGION_MOVE_TIMEOUT_MILLIS + " millis."));
+    }
+
+    // Force a major compaction such that the HBase table is backed by deterministic
+    // physical artifacts (files, WAL, etc.). Our #rows estimate relies on the sizes of
+    // these physical artifacts.
+    LOG.info("Major compacting HBase table: " + tableName);
+    admin.majorCompact(table);
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/impala-parent/pom.xml
----------------------------------------------------------------------
diff --git a/impala-parent/pom.xml b/impala-parent/pom.xml
index d962b43..bd091ea 100644
--- a/impala-parent/pom.xml
+++ b/impala-parent/pom.xml
@@ -87,6 +87,14 @@ under the License.
       </snapshots>
     </repository>
     <repository>
+      <id>impala.cdh.repo</id>
+      <url>https://${env.CDH_DOWNLOAD_HOST}/build/cdh_components/${env.CDH_BUILD_NUMBER}/maven</url>
+      <name>Impala CDH Repository</name>
+      <snapshots>
+        <enabled>true</enabled>
+      </snapshots>
+    </repository>
+    <repository>
       <id>cloudera.thirdparty.repo</id>
       <url>https://repository.cloudera.com/content/repositories/third-party</url>
       <name>Cloudera Third Party Repository</name>
@@ -115,45 +123,4 @@ under the License.
     </pluginRepository>
   </pluginRepositories>
 
-  <profiles>
-    <profile>
-      <id>impala-mini-cluster-profile-2</id>
-      <activation>
-        <property>
-          <name>env.IMPALA_MINICLUSTER_PROFILE</name>
-          <value>2</value>
-        </property>
-      </activation>
-      <repositories>
-        <repository>
-          <id>cdh.snapshots.repo</id>
-          <url>https://repository.cloudera.com/content/repositories/snapshots</url>
-          <name>CDH Snapshots Repository</name>
-          <snapshots>
-            <enabled>true</enabled>
-          </snapshots>
-        </repository>
-      </repositories>
-    </profile>
-    <profile>
-      <id>impala-mini-cluster-profile-3</id>
-      <activation>
-        <property>
-          <name>env.IMPALA_MINICLUSTER_PROFILE</name>
-          <value>3</value>
-        </property>
-      </activation>
-      <repositories>
-        <repository>
-          <id>impala.cdh.repo</id>
-          <url>https://${env.CDH_DOWNLOAD_HOST}/build/cdh_components/${env.CDH_BUILD_NUMBER}/maven</url>
-          <name>Impala CDH Repository</name>
-          <snapshots>
-            <enabled>true</enabled>
-          </snapshots>
-        </repository>
-      </repositories>
-    </profile>
-  </profiles>
-
 </project>

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/testdata/bin/run-hbase.sh
----------------------------------------------------------------------
diff --git a/testdata/bin/run-hbase.sh b/testdata/bin/run-hbase.sh
index 264951a..1433073 100755
--- a/testdata/bin/run-hbase.sh
+++ b/testdata/bin/run-hbase.sh
@@ -36,9 +36,7 @@ cat > ${HBASE_CONF_DIR}/hbase-env.sh <<EOF
 export JAVA_HOME=${JAVA_HOME}
 export HBASE_LOG_DIR=${HBASE_LOGDIR}
 export HBASE_PID_DIR=${HBASE_LOGDIR}
-if [[ $IMPALA_MINICLUSTER_PROFILE == 3 ]]; then
-  export HBASE_CLASSPATH=${HADOOP_CLASSPATH}
-fi
+export HBASE_CLASSPATH=${HADOOP_CLASSPATH}
 export HBASE_HEAPSIZE=1g
 EOF
 

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/testdata/bin/run-hive-server.sh
----------------------------------------------------------------------
diff --git a/testdata/bin/run-hive-server.sh b/testdata/bin/run-hive-server.sh
index 3b2c83d..2b5a486 100755
--- a/testdata/bin/run-hive-server.sh
+++ b/testdata/bin/run-hive-server.sh
@@ -75,12 +75,8 @@ if [ ${ONLY_METASTORE} -eq 0 ]; then
   # Starts a HiveServer2 instance on the port specified by the HIVE_SERVER2_THRIFT_PORT
   # environment variable. HADOOP_HEAPSIZE should be set to at least 2048 to avoid OOM
   # when loading ORC tables like widerow.
-  if [[ $IMPALA_MINICLUSTER_PROFILE == 2 ]]; then
-    HADOOP_HEAPSIZE="2048" hive --service hiveserver2 > ${LOGDIR}/hive-server2.out 2>&1 &
-  elif [[ $IMPALA_MINICLUSTER_PROFILE == 3 ]]; then
-    HADOOP_CLIENT_OPTS="-Xmx2048m -Dhive.log.file=hive-server2.log" hive \
+  HADOOP_CLIENT_OPTS="-Xmx2048m -Dhive.log.file=hive-server2.log" hive \
       --service hiveserver2 > ${LOGDIR}/hive-server2.out 2>&1 &
-  fi
 
   # Wait for the HiveServer2 service to come up because callers of this script
   # may rely on it being available.

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/testdata/bin/run-mini-dfs.sh
----------------------------------------------------------------------
diff --git a/testdata/bin/run-mini-dfs.sh b/testdata/bin/run-mini-dfs.sh
index aefa703..ea6c519 100755
--- a/testdata/bin/run-mini-dfs.sh
+++ b/testdata/bin/run-mini-dfs.sh
@@ -40,9 +40,6 @@ fi
 set +e
 $IMPALA_HOME/testdata/cluster/admin start_cluster
 if [[ $? != 0 ]]; then
-  # Don't issue Java version warning when not running Hadoop 3.
-  [[ $IMPALA_MINICLUSTER_PROFILE != 3 ]] && exit 1
-
   # Only issue Java version warning when running Java 7.
   $JAVA -version 2>&1 | grep -q 'java version "1.7' || exit 1
 

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/testdata/cluster/node_templates/common/etc/init.d/common.tmpl
----------------------------------------------------------------------
diff --git a/testdata/cluster/node_templates/common/etc/init.d/common.tmpl b/testdata/cluster/node_templates/common/etc/init.d/common.tmpl
index 8cde0b6..51197c5 100644
--- a/testdata/cluster/node_templates/common/etc/init.d/common.tmpl
+++ b/testdata/cluster/node_templates/common/etc/init.d/common.tmpl
@@ -19,20 +19,14 @@ NODE_DIR="${NODE_DIR}"
 PID_DIR="$NODE_DIR/var/run"
 LOG_DIR="$NODE_DIR/var/log"
 
-if [[ $IMPALA_MINICLUSTER_PROFILE == 3 ]]; then
-  export HADOOP_PID_DIR=$NODE_DIR/var/run
-fi
 export HADOOP_CONF_DIR="$NODE_DIR/etc/hadoop/conf"
-if [[ $IMPALA_MINICLUSTER_PROFILE == 2 ]]; then
-  export YARN_CONF_DIR="$HADOOP_CONF_DIR"
-fi
+export HADOOP_PID_DIR=$NODE_DIR/var/run
 
 # Mark each process so they can be killed if needed. This is a safety mechanism for
 # stopping the processes if the pid file has been removed for whatever reason.
 export HADOOP_OPTS+=" -D${KILL_CLUSTER_MARKER}"
-if [[ $IMPALA_MINICLUSTER_PROFILE == 3 ]]; then
-  export YARN_OPTS+=" -D${KILL_CLUSTER_MARKER}"
-fi
+export YARN_OPTS+=" -D${KILL_CLUSTER_MARKER}"
+
 # This is for KMS.
 export CATALINA_OPTS+=" -D${KILL_CLUSTER_MARKER}"
 

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/testdata/cluster/node_templates/common/etc/init.d/yarn-common
----------------------------------------------------------------------
diff --git a/testdata/cluster/node_templates/common/etc/init.d/yarn-common b/testdata/cluster/node_templates/common/etc/init.d/yarn-common
index dc60971..9934307 100644
--- a/testdata/cluster/node_templates/common/etc/init.d/yarn-common
+++ b/testdata/cluster/node_templates/common/etc/init.d/yarn-common
@@ -15,14 +15,6 @@
 # specific language governing permissions and limitations
 # under the License.
 
-if [[ $IMPALA_MINICLUSTER_PROFILE == 2 ]]; then
-  export YARN_LOG_DIR="$LOG_DIR/hadoop-yarn"
-  export YARN_ROOT_LOGGER="${YARN_ROOT_LOGGER:-DEBUG,RFA}"
-
-  export YARN_LOGFILE=$(basename $0).log
-elif [[ $IMPALA_MINICLUSTER_PROFILE == 3 ]]; then
-  export HADOOP_LOG_DIR="$LOG_DIR/hadoop-yarn"
-  export HADOOP_ROOT_LOGGER="${HADOOP_ROOT_LOGGER:-DEBUG,RFA}"
-
-  export HADOOP_LOGFILE=$(basename $0).log
-fi
+export HADOOP_LOG_DIR="$LOG_DIR/hadoop-yarn"
+export HADOOP_ROOT_LOGGER="${HADOOP_ROOT_LOGGER:-DEBUG,RFA}"
+export HADOOP_LOGFILE=$(basename $0).log

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/testdata/pom.xml
----------------------------------------------------------------------
diff --git a/testdata/pom.xml b/testdata/pom.xml
index af43fe9..22bf270 100644
--- a/testdata/pom.xml
+++ b/testdata/pom.xml
@@ -183,27 +183,6 @@ under the License.
           <redirectTestOutputToFile>true</redirectTestOutputToFile>
         </configuration>
       </plugin>
-
-      <!-- Support different src dirs for different minicluster profiles. -->
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-        <version>1.5</version>
-        <executions>
-          <execution>
-            <id>add-source</id>
-            <phase>generate-sources</phase>
-            <goals>
-              <goal>add-source</goal>
-            </goals>
-            <configuration>
-              <sources>
-                <source>${project.basedir}/src/compat-minicluster-profile-${env.IMPALA_MINICLUSTER_PROFILE}/java</source>
-              </sources>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
     </plugins>
   </build>
 

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/testdata/workloads/functional-query/queries/QueryTest/views-compatibility.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-query/queries/QueryTest/views-compatibility.test b/testdata/workloads/functional-query/queries/QueryTest/views-compatibility.test
index d35b4bf..f90d9b4 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/views-compatibility.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/views-compatibility.test
@@ -76,10 +76,10 @@ select id, int_col, string_col from functional.alltypesagg
 order by int_col limit 10 offset 5
 ---- CREATE_VIEW_RESULTS
 IMPALA=SUCCESS
-HIVE=SUCCESS_PROFILE_3_ONLY
+HIVE=SUCCESS
 ---- QUERY_IMPALA_VIEW_RESULTS
 IMPALA=SUCCESS
-HIVE=SUCCESS_PROFILE_3_ONLY
+HIVE=SUCCESS
 ====
 ---- CREATE_VIEW
 # Test that creating a view in Impala with "NULLS FIRST/LAST" works when the nulls
@@ -90,7 +90,7 @@ select id, int_col, string_col from functional.alltypesagg
 order by int_col asc nulls last limit 10
 ---- CREATE_VIEW_RESULTS
 IMPALA=SUCCESS
-HIVE=SUCCESS_PROFILE_3_ONLY
+HIVE=SUCCESS
 ---- QUERY_IMPALA_VIEW_RESULTS
 IMPALA=SUCCESS
 HIVE=SUCCESS
@@ -103,10 +103,10 @@ select id, int_col, string_col from functional.alltypesagg
 order by int_col desc nulls last limit 10
 ---- CREATE_VIEW_RESULTS
 IMPALA=SUCCESS
-HIVE=SUCCESS_PROFILE_3_ONLY
+HIVE=SUCCESS
 ---- QUERY_IMPALA_VIEW_RESULTS
 IMPALA=SUCCESS
-HIVE=SUCCESS_PROFILE_3_ONLY
+HIVE=SUCCESS
 ====
 ---- CREATE_VIEW
 # Test that exotic column names are quoted in

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/tests/common/environ.py
----------------------------------------------------------------------
diff --git a/tests/common/environ.py b/tests/common/environ.py
index fc863dc..b61da1b 100644
--- a/tests/common/environ.py
+++ b/tests/common/environ.py
@@ -181,6 +181,3 @@ def specific_build_type_timeout(
     timeout_val = default_timeout
   return timeout_val
 
-def is_hive_2():
-  """Returns True if IMPALA_MINICLUSTER_PROFILE in use provides Hive 2."""
-  return os.environ.get("IMPALA_MINICLUSTER_PROFILE", None) == "3"

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/tests/metadata/test_views_compatibility.py
----------------------------------------------------------------------
diff --git a/tests/metadata/test_views_compatibility.py b/tests/metadata/test_views_compatibility.py
index 11227dd..f4987c3 100644
--- a/tests/metadata/test_views_compatibility.py
+++ b/tests/metadata/test_views_compatibility.py
@@ -22,7 +22,6 @@ import shlex
 from subprocess import call
 
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
-from tests.common.environ import is_hive_2
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import SkipIfS3, SkipIfADLS, SkipIfIsilon, SkipIfLocal
 from tests.common.test_dimensions import create_uncompressed_text_dimension
@@ -254,8 +253,6 @@ class ViewCompatTestCase(object):
       component_value = components[2].upper()
       if component_value == 'SUCCESS':
         exp_res[components[0]] = True
-      elif component_value == 'SUCCESS_PROFILE_3_ONLY':
-        exp_res[components[0]] = is_hive_2()
       elif component_value == 'FAILURE':
         exp_res[components[0]] = False
       else: