You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by mi...@apache.org on 2018/07/16 15:53:54 UTC

[1/5] impala git commit: IMPALA-7288: Fix Codegen Crash in FinalizeModule() (Addendum)

Repository: impala
Updated Branches:
  refs/heads/master 3da2dc63f -> df78eaec0


IMPALA-7288: Fix Codegen Crash in FinalizeModule() (Addendum)

In addition to previous fix for IMPALA-7288, this patch would prevent
impala from crashing in case a code-path generates a malformed
handcrafted function which it then tries to finalize. Ideally this
would never happen since the code paths for generating handcrafted IRs
would never generate a malformed function.

Change-Id: Id09c6f59f677ba30145fb2081715f1a7d89fe20b
Reviewed-on: http://gerrit.cloudera.org:8080/10944
Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
Tested-by: Impala Public Jenkins <im...@cloudera.com>


Project: http://git-wip-us.apache.org/repos/asf/impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/impala/commit/d3660118
Tree: http://git-wip-us.apache.org/repos/asf/impala/tree/d3660118
Diff: http://git-wip-us.apache.org/repos/asf/impala/diff/d3660118

Branch: refs/heads/master
Commit: d36601182cb1e2f3693d379877df930250897886
Parents: 3da2dc6
Author: Bikramjeet Vig <bi...@cloudera.com>
Authored: Fri Jul 13 13:07:54 2018 -0700
Committer: Impala Public Jenkins <im...@cloudera.com>
Committed: Sat Jul 14 00:20:03 2018 +0000

----------------------------------------------------------------------
 be/src/codegen/llvm-codegen.cc | 5 +----
 be/src/codegen/llvm-codegen.h  | 4 ++--
 2 files changed, 3 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/impala/blob/d3660118/be/src/codegen/llvm-codegen.cc
----------------------------------------------------------------------
diff --git a/be/src/codegen/llvm-codegen.cc b/be/src/codegen/llvm-codegen.cc
index 7fe4ec1..173dbb2 100644
--- a/be/src/codegen/llvm-codegen.cc
+++ b/be/src/codegen/llvm-codegen.cc
@@ -1009,10 +1009,7 @@ llvm::Function* LlvmCodeGen::CloneFunction(llvm::Function* fn) {
 
 llvm::Function* LlvmCodeGen::FinalizeFunction(llvm::Function* function) {
   SetCPUAttrs(function);
-  if (!VerifyFunction(function)) {
-    function->eraseFromParent(); // deletes function
-    return NULL;
-  }
+  if (!VerifyFunction(function)) return NULL;
   finalized_functions_.insert(function);
   if (FLAGS_dump_ir) {
     string fn_name = function->getName();

http://git-wip-us.apache.org/repos/asf/impala/blob/d3660118/be/src/codegen/llvm-codegen.h
----------------------------------------------------------------------
diff --git a/be/src/codegen/llvm-codegen.h b/be/src/codegen/llvm-codegen.h
index 53569ca..7e9da26 100644
--- a/be/src/codegen/llvm-codegen.h
+++ b/be/src/codegen/llvm-codegen.h
@@ -392,8 +392,8 @@ class LlvmCodeGen {
   /// passed to AddFunctionToJit() otherwise the functions will be deleted from the
   /// module when the module is finalized. Also, all loaded functions that need to be JIT
   /// compiled after modification also need to be finalized.
-  /// If the function does not verify, it will delete the function and return NULL,
-  /// otherwise, it returns the function object.
+  /// If the function does not verify, it returns NULL and the function will eventually
+  /// be deleted in FinalizeModule(), otherwise, it returns the function object.
   llvm::Function* FinalizeFunction(llvm::Function* function);
 
   /// Adds the function to be automatically jit compiled when the codegen object is


[5/5] impala git commit: Revert "Revert "IMPALA-6988: Implement ALTER TABLE/VIEW SET OWNER""

Posted by mi...@apache.org.
Revert "Revert "IMPALA-6988: Implement ALTER TABLE/VIEW SET OWNER""

This reverts commit c01efd09679faaacfd5488fc7f4c1526a1af2f35.

Change-Id: I47ed9c6bb983120afdafadea8b65a9239cbf6d0f
Reviewed-on: http://gerrit.cloudera.org:8080/10947
Reviewed-by: Fredy Wijaya <fw...@cloudera.com>
Tested-by: Impala Public Jenkins <im...@cloudera.com>


Project: http://git-wip-us.apache.org/repos/asf/impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/impala/commit/df78eaec
Tree: http://git-wip-us.apache.org/repos/asf/impala/tree/df78eaec
Diff: http://git-wip-us.apache.org/repos/asf/impala/diff/df78eaec

Branch: refs/heads/master
Commit: df78eaec0b42d7fa5d148238f40fa57fcfcb25b5
Parents: a203733
Author: Fredy Wijaya <fw...@cloudera.com>
Authored: Sun Jul 15 10:45:06 2018 -0700
Committer: Impala Public Jenkins <im...@cloudera.com>
Committed: Sun Jul 15 21:50:23 2018 +0000

----------------------------------------------------------------------
 common/thrift/JniCatalog.thrift                 | 13 ++++
 fe/src/main/cup/sql-parser.cup                  | 36 +++++++++-
 .../analysis/AlterTableOrViewSetOwnerStmt.java  | 72 ++++++++++++++++++++
 .../impala/analysis/AlterTableSetOwnerStmt.java | 37 ++++++++++
 .../apache/impala/analysis/AlterTableStmt.java  |  2 +-
 .../impala/analysis/AlterViewSetOwnerStmt.java  | 37 ++++++++++
 .../impala/service/CatalogOpExecutor.java       | 16 ++++-
 .../apache/impala/analysis/AnalyzeDDLTest.java  | 38 +++++++++++
 .../impala/analysis/AuthorizationStmtTest.java  | 23 ++++++-
 .../org/apache/impala/analysis/ParserTest.java  | 26 +++++++
 tests/metadata/test_ddl.py                      | 22 ++++++
 tests/metadata/test_ddl_base.py                 | 19 ++++++
 12 files changed, 335 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/impala/blob/df78eaec/common/thrift/JniCatalog.thrift
----------------------------------------------------------------------
diff --git a/common/thrift/JniCatalog.thrift b/common/thrift/JniCatalog.thrift
index 1876138..4a8298b 100644
--- a/common/thrift/JniCatalog.thrift
+++ b/common/thrift/JniCatalog.thrift
@@ -104,6 +104,7 @@ enum TAlterTableType {
   SET_CACHED,
   RECOVER_PARTITIONS,
   SET_ROW_FORMAT,
+  SET_OWNER
 }
 
 // Parameters of CREATE DATABASE commands
@@ -317,6 +318,15 @@ struct TAlterTableSetLocationParams {
   2: optional list<CatalogObjects.TPartitionKeyValue> partition_spec
 }
 
+// Parameters for ALTER TABLE/VIEW SET OWNER commands.
+struct TAlterTableOrViewSetOwnerParams {
+  // The owner type.
+  1: required TOwnerType owner_type
+
+  // The owner name.
+  2: required string owner_name
+}
+
 // Parameters for updating the table and/or column statistics
 // of a table. Used by ALTER TABLE SET COLUMN STATS, and internally by
 // a COMPUTE STATS command.
@@ -397,6 +407,9 @@ struct TAlterTableParams {
 
   // Parameters for ALTER TABLE SET ROW FORMAT
   15: optional TAlterTableSetRowFormatParams set_row_format_params
+
+  // Parameters for ALTER TABLE/VIEW SET OWNER
+  16: optional TAlterTableOrViewSetOwnerParams set_owner_params
 }
 
 // Parameters of CREATE TABLE LIKE commands

http://git-wip-us.apache.org/repos/asf/impala/blob/df78eaec/fe/src/main/cup/sql-parser.cup
----------------------------------------------------------------------
diff --git a/fe/src/main/cup/sql-parser.cup b/fe/src/main/cup/sql-parser.cup
index bd00ad4..94b69e4 100644
--- a/fe/src/main/cup/sql-parser.cup
+++ b/fe/src/main/cup/sql-parser.cup
@@ -1058,6 +1058,9 @@ alter_db_stmt ::=
   :}
   ;
 
+// In some places, the opt_partition_set is used to avoid conflicts even though
+// a partition clause does not make sense for this stmt. If a partition
+// is given, manually throw a parse error.
 alter_tbl_stmt ::=
   KW_ALTER KW_TABLE table_name:table replace_existing_cols_val:replace KW_COLUMNS
   LPAREN column_def_list:col_defs RPAREN
@@ -1107,9 +1110,7 @@ alter_tbl_stmt ::=
   | KW_ALTER KW_TABLE table_name:table opt_partition_set:partition KW_SET
     KW_COLUMN KW_STATS ident_or_default:col LPAREN properties_map:map RPAREN
   {:
-    // The opt_partition_set is used to avoid conflicts even though
-    // a partition clause does not make sense for this stmt. If a partition
-    // is given, manually throw a parse error.
+    // See above for special partition clause handling.
     if (partition != null) parser.parseError("set", SqlParserSymbols.KW_SET);
     RESULT = new AlterTableSetColumnStats(table, col, map);
   :}
@@ -1134,6 +1135,23 @@ alter_tbl_stmt ::=
   | KW_ALTER KW_TABLE table_name:table KW_ALTER opt_kw_column ident_or_default:col_name
     KW_DROP KW_DEFAULT
   {: RESULT = AlterTableAlterColStmt.createDropDefaultStmt(table, col_name); :}
+  | KW_ALTER KW_TABLE table_name:table opt_partition_set:partitions KW_SET IDENT:owner_id
+    IDENT:user_id ident_or_default:user
+  {:
+    // See above for special partition clause handling.
+    if (partitions != null) parser.parseError("set", SqlParserSymbols.KW_SET);
+    parser.checkIdentKeyword("OWNER", owner_id);
+    parser.checkIdentKeyword("USER", user_id);
+    RESULT = new AlterTableSetOwnerStmt(table, new Owner(TOwnerType.USER, user));
+  :}
+  | KW_ALTER KW_TABLE table_name:table opt_partition_set:partitions KW_SET IDENT:owner_id
+    KW_ROLE ident_or_default:role
+  {:
+    // See above for special partition clause handling.
+    if (partitions != null) parser.parseError("set", SqlParserSymbols.KW_SET);
+    parser.checkIdentKeyword("OWNER", owner_id);
+    RESULT = new AlterTableSetOwnerStmt(table, new Owner(TOwnerType.ROLE, role));
+  :}
   ;
 
 table_property_type ::=
@@ -1877,6 +1895,18 @@ alter_view_stmt ::=
   {: RESULT = new AlterViewStmt(table, col_defs, view_def); :}
   | KW_ALTER KW_VIEW table_name:before_table KW_RENAME KW_TO table_name:new_table
   {: RESULT = new AlterTableOrViewRenameStmt(before_table, new_table, false); :}
+  | KW_ALTER KW_VIEW table_name:table KW_SET IDENT:owner_id IDENT:user_id
+    ident_or_default:user
+  {:
+    parser.checkIdentKeyword("OWNER", owner_id);
+    parser.checkIdentKeyword("USER", user_id);
+    RESULT = new AlterViewSetOwnerStmt(table, new Owner(TOwnerType.USER, user));
+  :}
+  | KW_ALTER KW_VIEW table_name:table KW_SET IDENT:owner_id KW_ROLE ident_or_default:role
+  {:
+    parser.checkIdentKeyword("OWNER", owner_id);
+    RESULT = new AlterViewSetOwnerStmt(table, new Owner(TOwnerType.ROLE, role));
+  :}
   ;
 
 cascade_val ::=

http://git-wip-us.apache.org/repos/asf/impala/blob/df78eaec/fe/src/main/java/org/apache/impala/analysis/AlterTableOrViewSetOwnerStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableOrViewSetOwnerStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableOrViewSetOwnerStmt.java
new file mode 100644
index 0000000..c508413
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableOrViewSetOwnerStmt.java
@@ -0,0 +1,72 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.analysis;
+
+import com.google.common.base.Preconditions;
+import org.apache.impala.authorization.Privilege;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.thrift.TAlterTableOrViewSetOwnerParams;
+import org.apache.impala.thrift.TAlterTableParams;
+import org.apache.impala.thrift.TAlterTableType;
+import org.apache.impala.util.MetaStoreUtil;
+
+/**
+ * A base class for ALTER TABLE/VIEW SET OWNER.
+ */
+public abstract class AlterTableOrViewSetOwnerStmt extends AlterTableStmt {
+  protected final Owner owner_;
+
+  public AlterTableOrViewSetOwnerStmt(TableName tableName, Owner owner) {
+    super(tableName);
+    Preconditions.checkNotNull(owner);
+    owner_ = owner;
+  }
+
+  @Override
+  public void analyze(Analyzer analyzer) throws AnalysisException {
+    String ownerName = owner_.getOwnerName();
+    if (ownerName.length() > MetaStoreUtil.MAX_OWNER_LENGTH) {
+      throw new AnalysisException(String.format("Owner name exceeds maximum length of " +
+          "%d characters. The given owner name has %d characters.",
+          MetaStoreUtil.MAX_OWNER_LENGTH, ownerName.length()));
+    }
+    tableName_ = analyzer.getFqTableName(tableName_);
+    TableRef tableRef = new TableRef(tableName_.toPath(), null, Privilege.ALTER);
+    tableRef = analyzer.resolveTableRef(tableRef);
+    Preconditions.checkNotNull(tableRef);
+    tableRef.analyze(analyzer);
+    validateType(tableRef);
+  }
+
+  /**
+   * Validates the type of the given TableRef.
+   */
+  protected abstract void validateType(TableRef tableRef) throws AnalysisException;
+
+  @Override
+  public TAlterTableParams toThrift() {
+    TAlterTableParams params = new TAlterTableParams();
+    params.setTable_name(tableName_.toThrift());
+    TAlterTableOrViewSetOwnerParams ownerParams = new TAlterTableOrViewSetOwnerParams();
+    ownerParams.setOwner_type(owner_.getOwnerType());
+    ownerParams.setOwner_name(owner_.getOwnerName());
+    params.setAlter_type(TAlterTableType.SET_OWNER);
+    params.setSet_owner_params(ownerParams);
+    return params;
+  }
+}

http://git-wip-us.apache.org/repos/asf/impala/blob/df78eaec/fe/src/main/java/org/apache/impala/analysis/AlterTableSetOwnerStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableSetOwnerStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetOwnerStmt.java
new file mode 100644
index 0000000..5932aff
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableSetOwnerStmt.java
@@ -0,0 +1,37 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.analysis;
+
+import org.apache.impala.common.AnalysisException;
+
+/**
+ * Represents an ALTER TABLE tbl SET OWNER [USER|ROLE] owner statement.
+ */
+public class AlterTableSetOwnerStmt extends AlterTableOrViewSetOwnerStmt {
+  public AlterTableSetOwnerStmt(TableName tableName, Owner owner) {
+    super(tableName, owner);
+  }
+
+  @Override
+  protected void validateType(TableRef tableRef) throws AnalysisException {
+    if (tableRef instanceof InlineViewRef) {
+      throw new AnalysisException(String.format(
+          "ALTER TABLE not allowed on a view: %s", tableName_));
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/impala/blob/df78eaec/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
index a173975..0089f0d 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
@@ -33,7 +33,7 @@ import com.google.common.base.Preconditions;
  * Base class for all ALTER TABLE statements.
  */
 public abstract class AlterTableStmt extends StatementBase {
-  protected final TableName tableName_;
+  protected TableName tableName_;
 
   // Set during analysis.
   protected FeTable table_;

http://git-wip-us.apache.org/repos/asf/impala/blob/df78eaec/fe/src/main/java/org/apache/impala/analysis/AlterViewSetOwnerStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterViewSetOwnerStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterViewSetOwnerStmt.java
new file mode 100644
index 0000000..4e1fc52
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterViewSetOwnerStmt.java
@@ -0,0 +1,37 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.analysis;
+
+import org.apache.impala.common.AnalysisException;
+
+/**
+ * Represents an ALTER VIEW v SET OWNER [USER|ROLE] owner statement.
+ */
+public class AlterViewSetOwnerStmt extends AlterTableOrViewSetOwnerStmt {
+  public AlterViewSetOwnerStmt(TableName tableName, Owner owner) {
+    super(tableName, owner);
+  }
+
+  @Override
+  protected void validateType(TableRef tableRef) throws AnalysisException {
+    if (!(tableRef instanceof InlineViewRef)) {
+      throw new AnalysisException(String.format(
+          "ALTER VIEW not allowed on a table: %s", tableName_));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/impala/blob/df78eaec/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
index 554d2c3..39cc108 100644
--- a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
+++ b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
@@ -100,6 +100,7 @@ import org.apache.impala.thrift.TAlterTableAddReplaceColsParams;
 import org.apache.impala.thrift.TAlterTableAlterColParams;
 import org.apache.impala.thrift.TAlterTableDropColParams;
 import org.apache.impala.thrift.TAlterTableDropPartitionParams;
+import org.apache.impala.thrift.TAlterTableOrViewSetOwnerParams;
 import org.apache.impala.thrift.TAlterTableParams;
 import org.apache.impala.thrift.TAlterTableSetCachedParams;
 import org.apache.impala.thrift.TAlterTableSetFileFormatParams;
@@ -557,6 +558,11 @@ public class CatalogOpExecutor {
           alterTableRecoverPartitions(tbl);
           addSummary(response, "Partitions have been recovered.");
           break;
+        case SET_OWNER:
+          Preconditions.checkState(params.isSetSet_owner_params());
+          alterTableOrViewSetOwner(tbl, params.getSet_owner_params());
+          addSummary(response, "Updated table/view.");
+          break;
         default:
           throw new UnsupportedOperationException(
               "Unknown ALTER TABLE operation type: " + params.getAlter_type());
@@ -2799,6 +2805,14 @@ public class CatalogOpExecutor {
     }
   }
 
+  private void alterTableOrViewSetOwner(Table tbl, TAlterTableOrViewSetOwnerParams params)
+      throws ImpalaRuntimeException {
+    org.apache.hadoop.hive.metastore.api.Table msTbl = tbl.getMetaStoreTable().deepCopy();
+    msTbl.setOwner(params.owner_name);
+    msTbl.setOwnerType(PrincipalType.valueOf(params.owner_type.name()));
+    applyAlterTable(msTbl, true);
+  }
+
   /**
    * Create a new HMS Partition.
    */
@@ -3564,7 +3578,7 @@ public class CatalogOpExecutor {
       }
     }
     addDbToCatalogUpdate(db, response.result);
-    addSummary(response, "Updated database");
+    addSummary(response, "Updated database.");
   }
 
   private void addDbToCatalogUpdate(Db db, TCatalogUpdateResult result) {

http://git-wip-us.apache.org/repos/asf/impala/blob/df78eaec/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java b/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
index c565b45..46b3787 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
@@ -4059,6 +4059,44 @@ public class AnalyzeDDLTest extends FrontendTestBase {
     }
   }
 
+  @Test
+  public void TestAlterTableSetOwner() {
+    String[] ownerTypes = new String[]{"user", "role"};
+    for (String ownerType : ownerTypes) {
+      AnalyzesOk(String.format("alter table functional.alltypes set owner %s foo",
+          ownerType));
+      AnalysisError(String.format("alter table nodb.alltypes set owner %s foo",
+          ownerType), "Could not resolve table reference: 'nodb.alltypes'");
+      AnalysisError(String.format("alter table functional.notbl set owner %s foo",
+          ownerType), "Could not resolve table reference: 'functional.notbl'");
+      AnalysisError(String.format("alter table functional.alltypes set owner %s %s",
+          ownerType, buildLongOwnerName()), "Owner name exceeds maximum length of 128 " +
+          "characters. The given owner name has 133 characters.");
+      AnalysisError(String.format("alter table functional.alltypes_view " +
+          "set owner %s foo", ownerType), "ALTER TABLE not allowed on a view: " +
+          "functional.alltypes_view");
+    }
+  }
+
+  @Test
+  public void TestAlterViewSetOwner() {
+    String[] ownerTypes = new String[]{"user", "role"};
+    for (String ownerType : ownerTypes) {
+      AnalyzesOk(String.format("alter view functional.alltypes_view set owner %s foo",
+          ownerType));
+      AnalysisError(String.format("alter view nodb.alltypes set owner %s foo",
+          ownerType), "Could not resolve table reference: 'nodb.alltypes'");
+      AnalysisError(String.format("alter view functional.notbl set owner %s foo",
+          ownerType), "Could not resolve table reference: 'functional.notbl'");
+      AnalysisError(String.format("alter view functional.alltypes_view set owner %s %s",
+          ownerType, buildLongOwnerName()), "Owner name exceeds maximum length of 128 " +
+          "characters. The given owner name has 133 characters.");
+      AnalysisError(String.format("alter view functional.alltypes " +
+          "set owner %s foo", ownerType), "ALTER VIEW not allowed on a table: " +
+          "functional.alltypes");
+    }
+  }
+
   private static String buildLongOwnerName() {
     StringBuilder comment = new StringBuilder();
     for (int i = 0; i < MetaStoreUtil.MAX_OWNER_LENGTH + 5; i++) {

http://git-wip-us.apache.org/repos/asf/impala/blob/df78eaec/fe/src/test/java/org/apache/impala/analysis/AuthorizationStmtTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/AuthorizationStmtTest.java b/fe/src/test/java/org/apache/impala/analysis/AuthorizationStmtTest.java
index 524aff3..52e0952 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AuthorizationStmtTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AuthorizationStmtTest.java
@@ -1652,7 +1652,9 @@ public class AuthorizationStmtTest extends FrontendTestBase {
             "delimited fields terminated by ' '"),
         authorize("alter table functional.alltypes add partition(year=1, month=1)"),
         authorize("alter table functional.alltypes drop partition(" +
-            "year=2009, month=1)")}) {
+            "year=2009, month=1)"),
+        authorize("alter table functional.alltypes set owner user foo_owner"),
+        authorize("alter table functional.alltypes set owner role foo_owner")}) {
       test.ok(onServer(TPrivilegeLevel.ALL))
           .ok(onServer(TPrivilegeLevel.ALTER))
           .ok(onDatabase("functional", TPrivilegeLevel.ALL))
@@ -1825,6 +1827,25 @@ public class AuthorizationStmtTest extends FrontendTestBase {
         .error(alterError("functional.alltypes_view"), onTable("functional",
             "alltypes_view", allExcept(TPrivilegeLevel.ALL, TPrivilegeLevel.ALTER)));
 
+    // Alter view set owner.
+    for (AuthzTest test: new AuthzTest[]{
+        authorize("alter view functional.alltypes_view set owner user foo_owner"),
+        authorize("alter view functional.alltypes_view set owner role foo_owner")}) {
+      test.ok(onServer(TPrivilegeLevel.ALL))
+          .ok(onServer(TPrivilegeLevel.ALTER))
+          .ok(onDatabase("functional", TPrivilegeLevel.ALL))
+          .ok(onDatabase("functional", TPrivilegeLevel.ALTER))
+          .ok(onTable("functional", "alltypes_view", TPrivilegeLevel.ALL))
+          .ok(onTable("functional", "alltypes_view", TPrivilegeLevel.ALTER))
+          .error(alterError("functional.alltypes_view"))
+          .error(alterError("functional.alltypes_view"), onServer(allExcept(
+              TPrivilegeLevel.ALL, TPrivilegeLevel.ALTER)))
+          .error(alterError("functional.alltypes_view"), onDatabase("functional", allExcept(
+              TPrivilegeLevel.ALL, TPrivilegeLevel.ALTER)))
+          .error(alterError("functional.alltypes_view"), onTable("functional",
+              "alltypes_view", allExcept(TPrivilegeLevel.ALL, TPrivilegeLevel.ALTER)));
+    }
+
     // Database does not exist.
     authorize("alter view nodb.alltypes_view as select 1")
         .error(alterError("nodb"))

http://git-wip-us.apache.org/repos/asf/impala/blob/df78eaec/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/ParserTest.java b/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
index 6059b8b..a86f467 100644
--- a/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
@@ -3809,4 +3809,30 @@ public class ParserTest extends FrontendTestBase {
     ParserError("ALTER DATABASE SET OWNER ROLE foo");
     ParserError("ALTER DATABASE SET OWNER");
   }
+
+  @Test
+  public void TestAlterTableOrViewSetOwner() {
+    for (String type : new String[]{"TABLE", "VIEW"}) {
+      for (String valid : new String[]{"foo", "user", "owner"}) {
+        ParsesOk(String.format("ALTER %s %s SET OWNER USER %s", type, valid, valid));
+        ParsesOk(String.format("ALTER %s %s SET OWNER ROLE %s", type, valid, valid));
+      }
+
+      for (String invalid : new String[]{"'foo'", "''", "NULL"}) {
+        ParserError(String.format("ALTER %s %s SET OWNER ROLE %s", type, invalid, invalid));
+        ParserError(String.format("ALTER %s %s SET OWNER USER %s", type, invalid, invalid));
+      }
+
+      ParserError(String.format("ALTER %s tbl PARTITION(i=1) SET OWNER ROLE foo", type));
+      ParserError(String.format("ALTER %s tbl SET ABC USER foo", type));
+      ParserError(String.format("ALTER %s tbl SET ABC ROLE foo", type));
+      ParserError(String.format("ALTER %s tbl SET OWNER ABC foo", type));
+      ParserError(String.format("ALTER %s tbl SET OWNER USER", type));
+      ParserError(String.format("ALTER %s SET OWNER foo", type));
+      ParserError(String.format("ALTER %s SET OWNER USER foo", type));
+      ParserError(String.format("ALTER %s tbl SET OWNER ROLE", type));
+      ParserError(String.format("ALTER %s SET OWNER ROLE foo", type));
+      ParserError(String.format("ALTER %s SET OWNER", type));
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/impala/blob/df78eaec/tests/metadata/test_ddl.py
----------------------------------------------------------------------
diff --git a/tests/metadata/test_ddl.py b/tests/metadata/test_ddl.py
index 7d2b714..ff6a566 100644
--- a/tests/metadata/test_ddl.py
+++ b/tests/metadata/test_ddl.py
@@ -227,6 +227,28 @@ class TestDdlStatements(TestDdlBase):
     assert len(properties) == 1
     assert {'foo_role': 'ROLE'} == properties
 
+  def test_alter_table_set_owner(self, vector, unique_database):
+    table_name = "{0}.test_owner_tbl".format(unique_database)
+    self.client.execute("create table {0}(i int)".format(table_name))
+    self.client.execute("alter table {0} set owner user foo_user".format(table_name))
+    owner = self._get_table_or_view_owner(table_name)
+    assert ('foo_user', 'USER') == owner
+
+    self.client.execute("alter table {0} set owner role foo_role".format(table_name))
+    owner = self._get_table_or_view_owner(table_name)
+    assert ('foo_role', 'ROLE') == owner
+
+  def test_alter_view_set_owner(self, vector, unique_database):
+    view_name = "{0}.test_owner_tbl".format(unique_database)
+    self.client.execute("create view {0} as select 1".format(view_name))
+    self.client.execute("alter view {0} set owner user foo_user".format(view_name))
+    owner = self._get_table_or_view_owner(view_name)
+    assert ('foo_user', 'USER') == owner
+
+    self.client.execute("alter view {0} set owner role foo_role".format(view_name))
+    owner = self._get_table_or_view_owner(view_name)
+    assert ('foo_role', 'ROLE') == owner
+
   # There is a query in QueryTest/create-table that references nested types, which is not
   # supported if old joins and aggs are enabled. Since we do not get any meaningful
   # additional coverage by running a DDL test under the old aggs and joins, it can be

http://git-wip-us.apache.org/repos/asf/impala/blob/df78eaec/tests/metadata/test_ddl_base.py
----------------------------------------------------------------------
diff --git a/tests/metadata/test_ddl_base.py b/tests/metadata/test_ddl_base.py
index bc74e6e..a27aa1c 100644
--- a/tests/metadata/test_ddl_base.py
+++ b/tests/metadata/test_ddl_base.py
@@ -93,6 +93,18 @@ class TestDdlBase(ImpalaTestSuite):
         properties[row[1].rstrip()] = row[2].rstrip()
     return properties
 
+  def _get_property(self, property_name, name, is_db=False):
+    """Extracts a db/table property value from the output of DESCRIBE FORMATTED."""
+    result = self.client.execute("describe {0} formatted {1}".format(
+      "database" if is_db else "", name))
+    for row in result.data:
+      if property_name in row:
+        row = row.split('\t')
+        if row[1] == 'NULL':
+          break
+        return row[1].rstrip()
+    return None
+
   def _get_db_comment(self, db_name):
     """Extracts the DB comment from the output of DESCRIBE DATABASE"""
     result = self.client.execute("describe database {0}".format(db_name))
@@ -110,3 +122,10 @@ class TestDdlBase(ImpalaTestSuite):
       if len(cols) == 3:
         comments[cols[0].rstrip()] = cols[2].rstrip()
     return comments.get(col_name)
+
+
+  def _get_table_or_view_owner(self, table_name):
+    """Returns a tuple(owner, owner_type) for a given table name"""
+    owner_name = self._get_property("Owner:", table_name)
+    owner_type = self._get_property("OwnerType:", table_name)
+    return (owner_name, owner_type)


[2/5] impala git commit: IMPALA-7295: Remove IMPALA_MINICLUSTER_PROFILE=2

Posted by mi...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/tests/query_test/test_partitioning.py
----------------------------------------------------------------------
diff --git a/tests/query_test/test_partitioning.py b/tests/query_test/test_partitioning.py
index 597e791..267d9e0 100644
--- a/tests/query_test/test_partitioning.py
+++ b/tests/query_test/test_partitioning.py
@@ -22,7 +22,6 @@ from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import SkipIfS3, SkipIfADLS, SkipIfIsilon, SkipIfLocal
 from tests.common.test_dimensions import create_single_exec_option_dimension
-from tests.common.environ import is_hive_2
 
 # Tests to validate HDFS partitioning.
 class TestPartitioning(ImpalaTestSuite):
@@ -79,10 +78,7 @@ class TestPartitioning(ImpalaTestSuite):
     # List the partitions. Show table stats returns 1 row for each partition + 1 summary
     # row
     result = self.execute_query("show table stats %s" % full_name)
-    if is_hive_2():
-      assert len(result.data) == 2 + 1
-    else:
-      assert len(result.data) == 3 + 1
+    assert len(result.data) == 2 + 1
 
     # Verify Impala properly merges the results of the Hive metadata,
     # whether it be good (Hive 2) or bad (Hive 1).


[4/5] impala git commit: IMPALA-7295: Remove IMPALA_MINICLUSTER_PROFILE=2

Posted by mi...@apache.org.
IMPALA-7295: Remove IMPALA_MINICLUSTER_PROFILE=2

This patch removes the use of IMPALA_MINICLUSTER_PROFILE. The code that
uses IMPALA_MINICLUSTER_PROFILE=2 is removed and it defaults to code from
IMPALA_MINICLUSTER_PROFILE=3. In order to reduce having too many code
changes in this patch, there is no code change for the shims. The shims
for IMPALA_MINICLUSTER_PROFILE=3 automatically become the default
implementation.

Testing:
- Ran core and exhaustive tests

Change-Id: Iba4a81165b3d2012dc04d4115454372c41e39f08
Reviewed-on: http://gerrit.cloudera.org:8080/10940
Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
Tested-by: Impala Public Jenkins <im...@cloudera.com>


Project: http://git-wip-us.apache.org/repos/asf/impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/impala/commit/a203733f
Tree: http://git-wip-us.apache.org/repos/asf/impala/tree/a203733f
Diff: http://git-wip-us.apache.org/repos/asf/impala/diff/a203733f

Branch: refs/heads/master
Commit: a203733fac3e1e37df8abeee39a88d187153a8c5
Parents: d366011
Author: Fredy Wijaya <fw...@cloudera.com>
Authored: Thu Jul 12 17:01:13 2018 -0700
Committer: Impala Public Jenkins <im...@cloudera.com>
Committed: Sat Jul 14 01:03:18 2018 +0000

----------------------------------------------------------------------
 bin/create-test-configuration.sh                |  12 +-
 bin/impala-config.sh                            | 102 +----
 bin/jenkins/build-all-flag-combinations.sh      |  16 +-
 fe/pom.xml                                      | 450 +++++++------------
 .../service/rpc/thrift/TGetCatalogsReq.java     |  24 -
 .../hive/service/rpc/thrift/TGetColumnsReq.java |  24 -
 .../service/rpc/thrift/TGetFunctionsReq.java    |  25 --
 .../hive/service/rpc/thrift/TGetInfoReq.java    |  24 -
 .../hive/service/rpc/thrift/TGetSchemasReq.java |  24 -
 .../hive/service/rpc/thrift/TGetTablesReq.java  |  24 -
 .../authorization/SentryAuthProvider.java       |  74 ---
 .../java/org/apache/impala/compat/HdfsShim.java |  31 --
 .../org/apache/impala/compat/MetastoreShim.java | 127 ------
 .../impala/compat/MiniclusterProfile.java       |  25 --
 .../java/org/apache/impala/util/SentryUtil.java |  49 --
 .../HBaseTestDataRegionAssignment.java          | 139 ------
 .../apache/impala/analysis/ParquetHelper.java   | 341 --------------
 .../authorization/ImpalaActionFactory.java      |  57 ---
 .../authorization/ImpalaPrivilegeModel.java     |  43 --
 .../authorization/SentryAuthProvider.java       |  80 ----
 .../java/org/apache/impala/compat/HdfsShim.java |  30 --
 .../org/apache/impala/compat/MetastoreShim.java | 127 ------
 .../impala/compat/MiniclusterProfile.java       |  25 --
 .../java/org/apache/impala/util/SentryUtil.java |  54 ---
 .../authorization/ImpalaActionFactoryTest.java  | 132 ------
 .../HBaseTestDataRegionAssignment.java          | 164 -------
 .../apache/impala/analysis/ParquetHelper.java   | 341 ++++++++++++++
 .../authorization/ImpalaActionFactory.java      |  57 +++
 .../authorization/ImpalaPrivilegeModel.java     |  43 ++
 .../authorization/SentryAuthProvider.java       |  80 ++++
 .../java/org/apache/impala/compat/HdfsShim.java |  30 ++
 .../org/apache/impala/compat/MetastoreShim.java | 127 ++++++
 .../java/org/apache/impala/util/SentryUtil.java |  54 +++
 .../impala/analysis/AuthorizationTest.java      |  13 +-
 .../authorization/ImpalaActionFactoryTest.java  | 132 ++++++
 .../apache/impala/common/FrontendTestBase.java  |  18 +-
 .../HBaseTestDataRegionAssignment.java          | 164 +++++++
 impala-parent/pom.xml                           |  49 +-
 testdata/bin/run-hbase.sh                       |   4 +-
 testdata/bin/run-hive-server.sh                 |   6 +-
 testdata/bin/run-mini-dfs.sh                    |   3 -
 .../common/etc/init.d/common.tmpl               |  12 +-
 .../common/etc/init.d/yarn-common               |  14 +-
 testdata/pom.xml                                |  21 -
 .../queries/QueryTest/views-compatibility.test  |  10 +-
 tests/common/environ.py                         |   3 -
 tests/metadata/test_views_compatibility.py      |   3 -
 tests/query_test/test_partitioning.py           |   6 +-
 48 files changed, 1264 insertions(+), 2149 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/bin/create-test-configuration.sh
----------------------------------------------------------------------
diff --git a/bin/create-test-configuration.sh b/bin/create-test-configuration.sh
index b6781c1..812154d 100755
--- a/bin/create-test-configuration.sh
+++ b/bin/create-test-configuration.sh
@@ -95,11 +95,7 @@ if [ $CREATE_METASTORE -eq 1 ]; then
   # Hive schema SQL scripts include other scripts using \i, which expects absolute paths.
   # Switch to the scripts directory to make this work.
   pushd ${HIVE_HOME}/scripts/metastore/upgrade/postgres
-  if [[ $IMPALA_MINICLUSTER_PROFILE == 2 ]]; then
-    psql -q -U hiveuser -d ${METASTORE_DB} -f hive-schema-1.1.0.postgres.sql
-  elif [[ $IMPALA_MINICLUSTER_PROFILE == 3 ]]; then
-    psql -q -U hiveuser -d ${METASTORE_DB} -f hive-schema-2.1.1.postgres.sql
-  fi
+  psql -q -U hiveuser -d ${METASTORE_DB} -f hive-schema-2.1.1.postgres.sql
   popd
   # Increase the size limit of PARAM_VALUE from SERDE_PARAMS table to be able to create
   # HBase tables with large number of columns.
@@ -164,11 +160,7 @@ fi
 
 generate_config postgresql-hive-site.xml.template hive-site.xml
 generate_config log4j.properties.template log4j.properties
-if [[ $IMPALA_MINICLUSTER_PROFILE == 3 ]]; then
-  generate_config hive-log4j2.properties.template hive-log4j2.properties
-else
-  generate_config hive-log4j.properties.template hive-log4j.properties
-fi
+generate_config hive-log4j2.properties.template hive-log4j2.properties
 generate_config hbase-site.xml.template hbase-site.xml
 generate_config authz-policy.ini.template authz-policy.ini
 generate_config sentry-site.xml.template sentry-site.xml

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/bin/impala-config.sh
----------------------------------------------------------------------
diff --git a/bin/impala-config.sh b/bin/impala-config.sh
index 771ca2b..0091cd3 100755
--- a/bin/impala-config.sh
+++ b/bin/impala-config.sh
@@ -157,67 +157,19 @@ fi
 export IMPALA_KUDU_VERSION=a954418
 unset IMPALA_KUDU_URL
 
-
-# Versions of Hadoop ecosystem dependencies.
-# ------------------------------------------
-# IMPALA_MINICLUSTER_PROFILE can have two values:
-# 2 represents:
-#    Hadoop 2.6
-#    HBase 1.2
-#    Hive 1.1
-#    Sentry 1.5
-#    Parquet 1.5
-#    Llama (used for Mini KDC) 1.0
-# 3 represents:
-#    Hadoop 3.0
-#    HBase 2.0
-#    Hive 2.1
-#    Sentry 2.0
-#    Parquet 1.9
-#
-# Impala 3.x defaults to profile 3 and marks profile 2 deprecated,
-# so that it may be removed in the 3.x line.
-
-DEFAULT_MINICLUSTER_PROFILE=3
-: ${IMPALA_MINICLUSTER_PROFILE_OVERRIDE:=$DEFAULT_MINICLUSTER_PROFILE}
-
 : ${CDH_DOWNLOAD_HOST:=native-toolchain.s3.amazonaws.com}
 export CDH_DOWNLOAD_HOST
-
-if [[ $IMPALA_MINICLUSTER_PROFILE_OVERRIDE == 2 ]]; then
-  echo "IMPALA_MINICLUSTER_PROFILE=2 is deprecated and may be removed in Impala 3.x"
-
-  export IMPALA_MINICLUSTER_PROFILE=2
-  export CDH_MAJOR_VERSION=5
-  export CDH_BUILD_NUMBER=44
-  export IMPALA_HADOOP_VERSION=2.6.0-cdh5.16.0-SNAPSHOT
-  export IMPALA_HBASE_VERSION=1.2.0-cdh5.16.0-SNAPSHOT
-  export IMPALA_HIVE_VERSION=1.1.0-cdh5.16.0-SNAPSHOT
-  export IMPALA_SENTRY_VERSION=1.5.1-cdh5.16.0-SNAPSHOT
-  export IMPALA_PARQUET_VERSION=1.5.0-cdh5.16.0-SNAPSHOT
-  export IMPALA_LLAMA_MINIKDC_VERSION=1.0.0
-  export IMPALA_KITE_VERSION=1.0.0-cdh5.16.0-SNAPSHOT
-  # Kudu version used to identify Java client jar from maven
-  export KUDU_JAVA_VERSION=1.8.0-cdh5.16.0-SNAPSHOT
-  # IMPALA-6972: Temporarily disable Hive parallelism during dataload
-  # The Hive version used for IMPALA_MINICLUSTER_PROFIILE=2 has a concurrency issue
-  # that intermittent fails parallel dataload.
-  export IMPALA_SERIAL_DATALOAD=1
-
-elif [[ $IMPALA_MINICLUSTER_PROFILE_OVERRIDE == 3 ]]; then
-  export IMPALA_MINICLUSTER_PROFILE=3
-  export CDH_MAJOR_VERSION=6
-  export CDH_BUILD_NUMBER=422770
-  export IMPALA_HADOOP_VERSION=3.0.0-cdh6.x-SNAPSHOT
-  export IMPALA_HBASE_VERSION=2.0.0-cdh6.x-SNAPSHOT
-  export IMPALA_HIVE_VERSION=2.1.1-cdh6.x-SNAPSHOT
-  export IMPALA_SENTRY_VERSION=2.0.0-cdh6.x-SNAPSHOT
-  export IMPALA_PARQUET_VERSION=1.9.0-cdh6.x-SNAPSHOT
-  export IMPALA_AVRO_JAVA_VERSION=1.8.2-cdh6.x-SNAPSHOT
-  export IMPALA_LLAMA_MINIKDC_VERSION=1.0.0
-  export IMPALA_KITE_VERSION=1.0.0-cdh6.x-SNAPSHOT
-  export KUDU_JAVA_VERSION=1.8.0-cdh6.x-SNAPSHOT
-fi
+export CDH_MAJOR_VERSION=6
+export CDH_BUILD_NUMBER=422770
+export IMPALA_HADOOP_VERSION=3.0.0-cdh6.x-SNAPSHOT
+export IMPALA_HBASE_VERSION=2.0.0-cdh6.x-SNAPSHOT
+export IMPALA_HIVE_VERSION=2.1.1-cdh6.x-SNAPSHOT
+export IMPALA_SENTRY_VERSION=2.0.0-cdh6.x-SNAPSHOT
+export IMPALA_PARQUET_VERSION=1.9.0-cdh6.x-SNAPSHOT
+export IMPALA_AVRO_JAVA_VERSION=1.8.2-cdh6.x-SNAPSHOT
+export IMPALA_LLAMA_MINIKDC_VERSION=1.0.0
+export IMPALA_KITE_VERSION=1.0.0-cdh6.x-SNAPSHOT
+export KUDU_JAVA_VERSION=1.8.0-cdh6.x-SNAPSHOT
 
 unset IMPALA_HADOOP_URL
 unset IMPALA_HBASE_URL
@@ -477,10 +429,6 @@ elif [ "${TARGET_FILESYSTEM}" = "local" ]; then
   export FILESYSTEM_PREFIX="${LOCAL_FS}"
 elif [ "${TARGET_FILESYSTEM}" = "hdfs" ]; then
   if [[ "${ERASURE_CODING}" = true ]]; then
-    if [[ "${IMPALA_MINICLUSTER_PROFILE}" -lt 3 ]]; then
-      echo "Hadoop 3 is required for HDFS erasure coding."
-      return 1
-    fi
     export HDFS_ERASURECODE_POLICY="RS-3-2-1024k"
     export HDFS_ERASURECODE_PATH="/test-warehouse"
   fi
@@ -548,14 +496,12 @@ export HADOOP_CLASSPATH="${HADOOP_CLASSPATH-}:${HADOOP_HOME}/share/hadoop/tools/
 export LZO_JAR_PATH="$HADOOP_LZO/build/hadoop-lzo-0.4.15.jar"
 HADOOP_CLASSPATH+=":$LZO_JAR_PATH"
 
-if [[ $IMPALA_MINICLUSTER_PROFILE == 3 ]]; then
-  # Beware of adding entries from $HADOOP_HOME here, because they can change
-  # the order of the classpath, leading to configuration not showing up first.
-  HADOOP_CLASSPATH="$LZO_JAR_PATH"
-  # Add the path containing the hadoop-aws jar, which is required to access AWS from the
-  # minicluster.
-  HADOOP_CLASSPATH="${HADOOP_CLASSPATH}:${HADOOP_HOME}/share/hadoop/tools/lib/*"
-fi
+# Beware of adding entries from $HADOOP_HOME here, because they can change
+# the order of the classpath, leading to configuration not showing up first.
+HADOOP_CLASSPATH="$LZO_JAR_PATH"
+# Add the path containing the hadoop-aws jar, which is required to access AWS from the
+# minicluster.
+HADOOP_CLASSPATH="${HADOOP_CLASSPATH}:${HADOOP_HOME}/share/hadoop/tools/lib/*"
 
 export MINI_DFS_BASE_DATA_DIR="$IMPALA_HOME/cdh-${CDH_MAJOR_VERSION}-hdfs-data"
 export PATH="$HADOOP_HOME/bin:$PATH"
@@ -724,18 +670,16 @@ else
       | sort | uniq`
 fi
 
-if [[ $IMPALA_MINICLUSTER_PROFILE_OVERRIDE == 3 ]]; then
-  # Check for minimum required Java version
-  # Only issue Java version warning when running Java 7.
-  if $JAVA -version 2>&1 | grep -q 'java version "1.7'; then
-    cat << EOF
+# Check for minimum required Java version
+# Only issue Java version warning when running Java 7.
+if $JAVA -version 2>&1 | grep -q 'java version "1.7'; then
+  cat << EOF
 
 WARNING: Your development environment is configured for Hadoop 3 and Java 7. Hadoop 3
 requires at least Java 8. Your JAVA binary currently points to $JAVA
 and reports the following version:
 
 EOF
-    $JAVA -version
-    echo
-  fi
+  $JAVA -version
+  echo
 fi

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/bin/jenkins/build-all-flag-combinations.sh
----------------------------------------------------------------------
diff --git a/bin/jenkins/build-all-flag-combinations.sh b/bin/jenkins/build-all-flag-combinations.sh
index 841b668..e6dfc1c 100755
--- a/bin/jenkins/build-all-flag-combinations.sh
+++ b/bin/jenkins/build-all-flag-combinations.sh
@@ -29,32 +29,22 @@ trap 'echo Error in $0 at line $LINENO: $(cd "'$PWD'" && awk "NR == $LINENO" $0)
 
 . bin/impala-config.sh
 
-# These are configurations for buildall, with a special sigil for
-# "minicluster profile" where appropriate.
+# These are configurations for buildall.
 CONFIGS=(
   # Test gcc builds with and without -so:
   "-skiptests -noclean"
-  "-skiptests -noclean -so -profile2"
   "-skiptests -noclean -release"
   "-skiptests -noclean -release -so -ninja"
   # clang sanitizer builds:
   "-skiptests -noclean -asan"
-  "-skiptests -noclean -ubsan -so -ninja -profile2"
   "-skiptests -noclean -tsan"
+  "-skiptests -noclean -ubsan -so -ninja"
 )
 
 FAILED=""
 
 for CONFIG in "${CONFIGS[@]}"; do
-  CONFIG2=${CONFIG/-profile2/}
-  if [[ "$CONFIG" != "$CONFIG2" ]]; then
-    CONFIG=$CONFIG2
-    export IMPALA_MINICLUSTER_PROFILE_OVERRIDE=2
-  else
-    export IMPALA_MINICLUSTER_PROFILE_OVERRIDE=3
-  fi
-
-  DESCRIPTION="Options $CONFIG and profile $IMPALA_MINICLUSTER_PROFILE_OVERRIDE"
+  DESCRIPTION="Options $CONFIG"
 
   if [[ $# == 1 && $1 == "--dryrun" ]]; then
     echo $DESCRIPTION

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/pom.xml
----------------------------------------------------------------------
diff --git a/fe/pom.xml b/fe/pom.xml
index 7e73242..70ff9cc 100644
--- a/fe/pom.xml
+++ b/fe/pom.xml
@@ -186,6 +186,18 @@ under the License.
     </dependency>
 
     <dependency>
+      <groupId>org.apache.sentry</groupId>
+      <artifactId>sentry-policy-engine</artifactId>
+      <version>${sentry.version}</version>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.parquet</groupId>
+      <artifactId>parquet-hadoop-bundle</artifactId>
+      <version>${parquet.version}</version>
+    </dependency>
+
+    <dependency>
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-client</artifactId>
       <version>${hbase.version}</version>
@@ -258,6 +270,7 @@ under the License.
         </exclusion>
       </exclusions>
     </dependency>
+
     <dependency>
       <groupId>org.apache.hive</groupId>
       <artifactId>hive-serde</artifactId>
@@ -272,6 +285,109 @@ under the License.
     </dependency>
 
     <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-exec</artifactId>
+      <version>${hive.version}</version>
+      <exclusions>
+        <!-- Impala uses log4j v1; avoid pulling in slf4j handling for log4j2 -->
+        <exclusion>
+          <groupId>org.apache.logging.log4j</groupId>
+          <artifactId>log4j-slf4j-impl</artifactId>
+        </exclusion>
+        <!-- https://issues.apache.org/jira/browse/HADOOP-14903 -->
+        <exclusion>
+          <groupId>net.minidev</groupId>
+          <artifactId>json-smart</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-common</artifactId>
+      <version>${hive.version}</version>
+      <exclusions>
+        <!-- Impala uses log4j v1; avoid pulling in slf4j handling for log4j2 -->
+        <exclusion>
+          <groupId>org.apache.logging.log4j</groupId>
+          <artifactId>log4j-slf4j-impl</artifactId>
+        </exclusion>
+        <!-- https://issues.apache.org/jira/browse/HADOOP-14903 -->
+        <exclusion>
+          <groupId>net.minidev</groupId>
+          <artifactId>json-smart</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-jdbc</artifactId>
+      <version>${hive.version}</version>
+      <scope>test</scope>
+      <exclusions>
+        <!-- Impala uses log4j v1; avoid pulling in slf4j handling for log4j2 -->
+        <exclusion>
+          <groupId>org.apache.logging.log4j</groupId>
+          <artifactId>log4j-slf4j-impl</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>net.minidev</groupId>
+          <artifactId>json-smart</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-hbase-handler</artifactId>
+      <version>${hive.version}</version>
+      <exclusions>
+        <!-- Impala uses log4j v1; avoid pulling in slf4j handling for log4j2 -->
+        <exclusion>
+          <groupId>org.apache.logging.log4j</groupId>
+          <artifactId>log4j-slf4j-impl</artifactId>
+        </exclusion>
+        <!-- https://issues.apache.org/jira/browse/HADOOP-14903 -->
+        <exclusion>
+          <groupId>net.minidev</groupId>
+          <artifactId>json-smart</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-metastore</artifactId>
+      <version>${hive.version}</version>
+      <exclusions>
+        <!-- Impala uses log4j v1; avoid pulling in slf4j handling for log4j2 -->
+        <exclusion>
+          <groupId>org.apache.logging.log4j</groupId>
+          <artifactId>log4j-slf4j-impl</artifactId>
+        </exclusion>
+        <!-- https://issues.apache.org/jira/browse/HADOOP-14903 -->
+        <exclusion>
+          <groupId>net.minidev</groupId>
+          <artifactId>json-smart</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hive.shims</groupId>
+      <artifactId>hive-shims-common</artifactId>
+      <version>${hive.version}</version>
+      <exclusions>
+        <!-- Impala uses log4j v1; avoid pulling in slf4j handling for log4j2 -->
+        <exclusion>
+          <groupId>org.apache.logging.log4j</groupId>
+          <artifactId>log4j-slf4j-impl</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
       <groupId>org.apache.kudu</groupId>
       <artifactId>kudu-client</artifactId>
       <version>${kudu.version}</version>
@@ -490,19 +606,6 @@ under the License.
                         -->
                 <source>${project.basedir}/generated-sources/gen-java</source>
                 <source>${project.build.directory}/generated-sources/cup</source>
-                <source>${project.basedir}/src/compat-minicluster-profile-${env.IMPALA_MINICLUSTER_PROFILE}/java</source>
-              </sources>
-            </configuration>
-          </execution>
-          <execution>
-            <id>add-test-source</id>
-            <phase>generate-test-sources</phase>
-            <goals>
-              <goal>add-test-source</goal>
-            </goals>
-            <configuration>
-              <sources>
-                <source>${project.basedir}/src/compat-minicluster-profile-${env.IMPALA_MINICLUSTER_PROFILE}/test/java</source>
               </sources>
             </configuration>
           </execution>
@@ -568,6 +671,46 @@ under the License.
         </configuration>
       </plugin>
 
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-enforcer-plugin</artifactId>
+        <version>3.0.0-M1</version>
+        <executions>
+          <execution>
+            <id>enforce-banned-dependencies</id>
+            <goals>
+              <goal>enforce</goal>
+            </goals>
+            <configuration>
+              <rules>
+                <bannedDependencies>
+                  <excludes>
+                    <exclude>org.apache.logging.log4j:log4j-slf4j-impl</exclude>
+                    <!-- Assert that we only use artifacts from only the specified
+                         version of these components. -->
+                    <exclude>org.apache.hadoop:*</exclude>
+                    <exclude>org.apache.hbase:*</exclude>
+                    <exclude>org.apache.hive:*</exclude>
+                    <exclude>org.apache.kudu:*</exclude>
+                    <exclude>org.apache.sentry:*</exclude>
+                    <exclude>org.apache.parquet:*</exclude>
+                  </excludes>
+                  <includes>
+                    <include>org.apache.hadoop:*:${hadoop.version}</include>
+                    <include>org.apache.hbase:*:${hbase.version}</include>
+                    <include>org.apache.hive:*:${hive.version}</include>
+                    <include>org.apache.kudu:*:${kudu.version}</include>
+                    <include>org.apache.sentry:*:${sentry.version}</include>
+                    <include>org.apache.parquet:*:${parquet.version}</include>
+                  </includes>
+                </bannedDependencies>
+              </rules>
+              <fail>true</fail>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+
   </plugins>
     <pluginManagement>
       <plugins>
@@ -730,270 +873,23 @@ under the License.
         </plugins>
       </build>
     </profile>
+  </profiles>
 
-    <profile>
-      <id>impala-minicluster-profile-2</id>
-      <activation>
-        <property>
-          <name>env.IMPALA_MINICLUSTER_PROFILE</name>
-          <value>2</value>
-        </property>
-      </activation>
-      <dependencies>
-        <dependency>
-          <groupId>org.apache.sentry</groupId>
-          <artifactId>sentry-policy-db</artifactId>
-          <version>${sentry.version}</version>
-        </dependency>
-        <dependency>
-          <groupId>com.twitter</groupId>
-          <artifactId>parquet-hadoop-bundle</artifactId>
-          <version>${parquet.version}</version>
-        </dependency>
-        <dependency>
-          <groupId>org.apache.hive</groupId>
-          <artifactId>hive-exec</artifactId>
-          <version>${hive.version}</version>
-          <exclusions>
-            <!-- https://issues.apache.org/jira/browse/HADOOP-14903 -->
-            <exclusion>
-              <groupId>net.minidev</groupId>
-              <artifactId>json-smart</artifactId>
-            </exclusion>
-          </exclusions>
-        </dependency>
-        <dependency>
-          <groupId>org.apache.hive</groupId>
-          <artifactId>hive-common</artifactId>
-          <version>${hive.version}</version>
-        </dependency>
-        <dependency>
-          <groupId>org.apache.hive</groupId>
-          <artifactId>hive-jdbc</artifactId>
-          <version>${hive.version}</version>
-          <scope>test</scope>
-        </dependency>
-        <dependency>
-          <groupId>org.apache.hive</groupId>
-          <artifactId>hive-hbase-handler</artifactId>
-          <version>${hive.version}</version>
-        </dependency>
-        <dependency>
-          <groupId>org.apache.hive</groupId>
-          <artifactId>hive-metastore</artifactId>
-          <version>${hive.version}</version>
-        </dependency>
-      </dependencies>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-antrun-plugin</artifactId>
-            <version>1.8</version>
-            <executions>
-              <execution>
-                <id>generate-minicluster-profile-2-compat-shims</id>
-                <phase>generate-sources</phase>
-                <goals>
-                  <goal>run</goal>
-                </goals>
-                <configuration>
-                  <!-- Generates ParquetHelper for minicluster profile 2. -->
-                  <target>
-                    <echo file="${project.build.directory}/gen-compat-shims.sh">
-                      echo Generating shims for Minicluster Profile 2
-                      mkdir -p generated-sources/gen-java/org/apache/impala/analysis
-                      sed -e s,org.apache.parquet,parquet,g src/compat-minicluster-profile-3/java/org/apache/impala/analysis/ParquetHelper.java > generated-sources/gen-java/org/apache/impala/analysis/ParquetHelper.java
-                    </echo>
-                    <exec executable="bash" dir="${project.basedir}" failonerror="true">
-                      <arg line="-ex ${project.build.directory}/gen-compat-shims.sh" />
-                    </exec>
-                  </target>
-                </configuration>
-              </execution>
-            </executions>
-          </plugin>
-        </plugins>
-      </build>
+  <dependencyManagement>
+    <dependencies>
+      <!--
+      Pin org.glassfish:javax.el explicitly.
 
-    </profile>
-    <profile>
-      <id>impala-minicluster-profile-3</id>
-      <activation>
-        <property>
-          <name>env.IMPALA_MINICLUSTER_PROFILE</name>
-          <value>3</value>
-        </property>
-      </activation>
-      <dependencies>
-        <dependency>
-          <groupId>org.apache.sentry</groupId>
-          <artifactId>sentry-policy-engine</artifactId>
-          <version>${sentry.version}</version>
-        </dependency>
-        <dependency>
-          <groupId>org.apache.parquet</groupId>
-          <artifactId>parquet-hadoop-bundle</artifactId>
-          <version>${parquet.version}</version>
-        </dependency>
-        <dependency>
-          <groupId>org.apache.hive</groupId>
-          <artifactId>hive-exec</artifactId>
-          <version>${hive.version}</version>
-          <exclusions>
-            <!-- Impala uses log4j v1; avoid pulling in slf4j handling for log4j2 -->
-            <exclusion>
-              <groupId>org.apache.logging.log4j</groupId>
-              <artifactId>log4j-slf4j-impl</artifactId>
-            </exclusion>
-            <!-- https://issues.apache.org/jira/browse/HADOOP-14903 -->
-            <exclusion>
-              <groupId>net.minidev</groupId>
-              <artifactId>json-smart</artifactId>
-            </exclusion>
-          </exclusions>
-        </dependency>
-        <dependency>
-          <groupId>org.apache.hive</groupId>
-          <artifactId>hive-common</artifactId>
-          <version>${hive.version}</version>
-          <exclusions>
-            <!-- Impala uses log4j v1; avoid pulling in slf4j handling for log4j2 -->
-            <exclusion>
-              <groupId>org.apache.logging.log4j</groupId>
-              <artifactId>log4j-slf4j-impl</artifactId>
-            </exclusion>
-            <!-- https://issues.apache.org/jira/browse/HADOOP-14903 -->
-            <exclusion>
-              <groupId>net.minidev</groupId>
-              <artifactId>json-smart</artifactId>
-            </exclusion>
-          </exclusions>
-        </dependency>
-        <dependency>
-          <groupId>org.apache.hive</groupId>
-          <artifactId>hive-jdbc</artifactId>
-          <version>${hive.version}</version>
-          <scope>test</scope>
-          <exclusions>
-            <!-- Impala uses log4j v1; avoid pulling in slf4j handling for log4j2 -->
-            <exclusion>
-              <groupId>org.apache.logging.log4j</groupId>
-              <artifactId>log4j-slf4j-impl</artifactId>
-            </exclusion>
-            <exclusion>
-              <groupId>net.minidev</groupId>
-              <artifactId>json-smart</artifactId>
-            </exclusion>
-          </exclusions>
-        </dependency>
-        <dependency>
-          <groupId>org.apache.hive</groupId>
-          <artifactId>hive-hbase-handler</artifactId>
-          <version>${hive.version}</version>
-          <exclusions>
-            <!-- Impala uses log4j v1; avoid pulling in slf4j handling for log4j2 -->
-            <exclusion>
-              <groupId>org.apache.logging.log4j</groupId>
-              <artifactId>log4j-slf4j-impl</artifactId>
-            </exclusion>
-            <!-- https://issues.apache.org/jira/browse/HADOOP-14903 -->
-            <exclusion>
-              <groupId>net.minidev</groupId>
-              <artifactId>json-smart</artifactId>
-            </exclusion>
-          </exclusions>
-        </dependency>
-        <dependency>
-          <groupId>org.apache.hive</groupId>
-          <artifactId>hive-metastore</artifactId>
-          <version>${hive.version}</version>
-          <exclusions>
-            <!-- Impala uses log4j v1; avoid pulling in slf4j handling for log4j2 -->
-            <exclusion>
-              <groupId>org.apache.logging.log4j</groupId>
-              <artifactId>log4j-slf4j-impl</artifactId>
-            </exclusion>
-            <!-- https://issues.apache.org/jira/browse/HADOOP-14903 -->
-            <exclusion>
-              <groupId>net.minidev</groupId>
-              <artifactId>json-smart</artifactId>
-            </exclusion>
-          </exclusions>
-        </dependency>
-        <dependency>
-          <groupId>org.apache.hive.shims</groupId>
-          <artifactId>hive-shims-common</artifactId>
-          <version>${hive.version}</version>
-          <exclusions>
-            <!-- Impala uses log4j v1; avoid pulling in slf4j handling for log4j2 -->
-            <exclusion>
-              <groupId>org.apache.logging.log4j</groupId>
-              <artifactId>log4j-slf4j-impl</artifactId>
-            </exclusion>
-          </exclusions>
-        </dependency>
-      </dependencies>
-      <build>
-        <plugins>
-          <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-enforcer-plugin</artifactId>
-            <version>3.0.0-M1</version>
-            <executions>
-              <execution>
-                <id>enforce-banned-dependencies</id>
-                <goals>
-                  <goal>enforce</goal>
-                </goals>
-                <configuration>
-                  <rules>
-                    <bannedDependencies>
-                      <excludes>
-                        <exclude>org.apache.logging.log4j:log4j-slf4j-impl</exclude>
-                        <!-- Assert that we only use artifacts from only the specified
-                             version of these components. -->
-                        <exclude>org.apache.hadoop:*</exclude>
-                        <exclude>org.apache.hbase:*</exclude>
-                        <exclude>org.apache.hive:*</exclude>
-                        <exclude>org.apache.kudu:*</exclude>
-                        <exclude>org.apache.sentry:*</exclude>
-                        <exclude>org.apache.parquet:*</exclude>
-                      </excludes>
-                      <includes>
-                        <include>org.apache.hadoop:*:${hadoop.version}</include>
-                        <include>org.apache.hbase:*:${hbase.version}</include>
-                        <include>org.apache.hive:*:${hive.version}</include>
-                        <include>org.apache.kudu:*:${kudu.version}</include>
-                        <include>org.apache.sentry:*:${sentry.version}</include>
-                        <include>org.apache.parquet:*:${parquet.version}</include>
-                      </includes>
-                    </bannedDependencies>
-                  </rules>
-                  <fail>true</fail>
-                </configuration>
-              </execution>
-            </executions>
-          </plugin>
-        </plugins>
-      </build>
-      <dependencyManagement>
-        <dependencies>
-          <!--
-          Pin org.glassfish:javax.el explicitly.
-
-          HBase depends on this indirectly, and it's configured with
-          a range of versions. This causes Maven to talk to all configured
-          repositories, leading both to a lot of chattiness, and also
-          failures if one of the repositories is unavailable.
-          -->
-          <dependency>
-            <groupId>org.glassfish</groupId>
-            <artifactId>javax.el</artifactId>
-            <version>3.0.1-b08</version>
-          </dependency>
-        </dependencies>
-      </dependencyManagement>
-    </profile>
-  </profiles>
+      HBase depends on this indirectly, and it's configured with
+      a range of versions. This causes Maven to talk to all configured
+      repositories, leading both to a lot of chattiness, and also
+      failures if one of the repositories is unavailable.
+      -->
+      <dependency>
+        <groupId>org.glassfish</groupId>
+        <artifactId>javax.el</artifactId>
+        <version>3.0.1-b08</version>
+      </dependency>
+    </dependencies>
+  </dependencyManagement>
 </project>

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetCatalogsReq.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetCatalogsReq.java b/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetCatalogsReq.java
deleted file mode 100644
index 6a264bd..0000000
--- a/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetCatalogsReq.java
+++ /dev/null
@@ -1,24 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.hive.service.rpc.thrift;
-
-/**
- * Wrapper to allow the same code to instantiate the equivalent classes from Hive 1 and
- * Hive 2 APIs.
- */
-public class TGetCatalogsReq extends org.apache.hive.service.cli.thrift.TGetCatalogsReq {}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetColumnsReq.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetColumnsReq.java b/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetColumnsReq.java
deleted file mode 100644
index b35819a..0000000
--- a/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetColumnsReq.java
+++ /dev/null
@@ -1,24 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.hive.service.rpc.thrift;
-
-/**
- * Wrapper to allow the same code to instantiate the equivalent classes from Hive 1 and
- * Hive 2 APIs.
- */
-public class TGetColumnsReq extends org.apache.hive.service.cli.thrift.TGetColumnsReq {}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetFunctionsReq.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetFunctionsReq.java b/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetFunctionsReq.java
deleted file mode 100644
index 63424eb..0000000
--- a/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetFunctionsReq.java
+++ /dev/null
@@ -1,25 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.hive.service.rpc.thrift;
-
-/**
- * Wrapper to allow the same code to instantiate the equivalent classes from Hive 1 and
- * Hive 2 APIs.
- */
-public class TGetFunctionsReq
-    extends org.apache.hive.service.cli.thrift.TGetFunctionsReq {}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetInfoReq.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetInfoReq.java b/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetInfoReq.java
deleted file mode 100644
index 708134d..0000000
--- a/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetInfoReq.java
+++ /dev/null
@@ -1,24 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.hive.service.rpc.thrift;
-
-/**
- * Wrapper to allow the same code to instantiate the equivalent classes from Hive 1 and
- * Hive 2 APIs.
- */
-public class TGetInfoReq extends org.apache.hive.service.cli.thrift.TGetInfoReq {}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetSchemasReq.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetSchemasReq.java b/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetSchemasReq.java
deleted file mode 100644
index 3b6ec26..0000000
--- a/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetSchemasReq.java
+++ /dev/null
@@ -1,24 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.hive.service.rpc.thrift;
-
-/**
- * Wrapper to allow the same code to instantiate the equivalent classes from Hive 1 and
- * Hive 2 APIs.
- */
-public class TGetSchemasReq extends org.apache.hive.service.cli.thrift.TGetSchemasReq {}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetTablesReq.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetTablesReq.java b/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetTablesReq.java
deleted file mode 100644
index fd309d4..0000000
--- a/fe/src/compat-minicluster-profile-2/java/org/apache/hive/service/rpc/thrift/TGetTablesReq.java
+++ /dev/null
@@ -1,24 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.hive.service.rpc.thrift;
-
-/**
- * Wrapper to allow the same code to instantiate the equivalent classes from Hive 1 and
- * Hive 2 APIs.
- */
-public class TGetTablesReq extends org.apache.hive.service.cli.thrift.TGetTablesReq {}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-2/java/org/apache/impala/authorization/SentryAuthProvider.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-2/java/org/apache/impala/authorization/SentryAuthProvider.java b/fe/src/compat-minicluster-profile-2/java/org/apache/impala/authorization/SentryAuthProvider.java
deleted file mode 100644
index 4793516..0000000
--- a/fe/src/compat-minicluster-profile-2/java/org/apache/impala/authorization/SentryAuthProvider.java
+++ /dev/null
@@ -1,74 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.impala.authorization;
-
-import com.google.common.base.Preconditions;
-import org.apache.sentry.policy.db.SimpleDBPolicyEngine;
-import org.apache.commons.lang.reflect.ConstructorUtils;
-import org.apache.impala.catalog.AuthorizationPolicy;
-import org.apache.sentry.provider.file.SimpleFileProviderBackend;
-import org.apache.sentry.policy.common.PolicyEngine;
-import org.apache.sentry.provider.cache.SimpleCacheProviderBackend;
-import org.apache.sentry.provider.common.ProviderBackend;
-import org.apache.sentry.provider.common.ProviderBackendContext;
-import org.apache.sentry.provider.common.ResourceAuthorizationProvider;
-
-/**
- * Wrapper to facilitate differences in Sentry APIs across
- * Sentry versions.
- */
-class SentryAuthProvider {
-  /*
-   * Creates a new ResourceAuthorizationProvider based on the given configuration.
-   */
-  static ResourceAuthorizationProvider createProvider(AuthorizationConfig config,
-      AuthorizationPolicy policy) {
-    try {
-      ProviderBackend providerBe;
-      // Create the appropriate backend provider.
-      if (config.isFileBasedPolicy()) {
-        providerBe = new SimpleFileProviderBackend(config.getSentryConfig().getConfig(),
-            config.getPolicyFile());
-      } else {
-        // Note: The second parameter to the ProviderBackend is a "resourceFile" path
-        // which is not used by Impala. We cannot pass 'null' so instead pass an empty
-        // string.
-        providerBe = new SimpleCacheProviderBackend(config.getSentryConfig().getConfig(),
-            "");
-        Preconditions.checkNotNull(policy);
-        ProviderBackendContext context = new ProviderBackendContext();
-        context.setBindingHandle(policy);
-        providerBe.initialize(context);
-      }
-
-      SimpleDBPolicyEngine engine =
-          new SimpleDBPolicyEngine(config.getServerName(), providerBe);
-
-      // Try to create an instance of the specified policy provider class.
-      // Re-throw any exceptions that are encountered.
-      String policyFile = config.getPolicyFile() == null ? "" : config.getPolicyFile();
-      return (ResourceAuthorizationProvider) ConstructorUtils.invokeConstructor(
-          Class.forName(config.getPolicyProviderClassName()),
-          new Object[] {policyFile, engine});
-    } catch (Exception e) {
-      // Re-throw as unchecked exception.
-      throw new IllegalStateException(
-          "Error creating ResourceAuthorizationProvider: ", e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-2/java/org/apache/impala/compat/HdfsShim.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-2/java/org/apache/impala/compat/HdfsShim.java b/fe/src/compat-minicluster-profile-2/java/org/apache/impala/compat/HdfsShim.java
deleted file mode 100644
index ef3da61..0000000
--- a/fe/src/compat-minicluster-profile-2/java/org/apache/impala/compat/HdfsShim.java
+++ /dev/null
@@ -1,31 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.impala.compat;
-
-import org.apache.hadoop.fs.FileStatus;
-
-/**
- * Wrapper classes to abstract away differences between HDFS versions in
- * the MiniCluster profiles.
- */
-public class HdfsShim {
-  public static boolean isErasureCoded(FileStatus fileStatus) {
-    // Hadoop 2 didn't support Erasure Coding
-    return false;
-  }
-}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-2/java/org/apache/impala/compat/MetastoreShim.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-2/java/org/apache/impala/compat/MetastoreShim.java b/fe/src/compat-minicluster-profile-2/java/org/apache/impala/compat/MetastoreShim.java
deleted file mode 100644
index d0cd351..0000000
--- a/fe/src/compat-minicluster-profile-2/java/org/apache/impala/compat/MetastoreShim.java
+++ /dev/null
@@ -1,127 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.impala.compat;
-
-import java.util.List;
-
-import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hive.service.cli.thrift.TGetColumnsReq;
-import org.apache.hive.service.cli.thrift.TGetFunctionsReq;
-import org.apache.hive.service.cli.thrift.TGetSchemasReq;
-import org.apache.hive.service.cli.thrift.TGetTablesReq;
-import org.apache.impala.authorization.User;
-import org.apache.impala.common.Pair;
-import org.apache.impala.common.ImpalaException;
-import org.apache.impala.service.Frontend;
-import org.apache.impala.service.MetadataOp;
-import org.apache.impala.thrift.TMetadataOpRequest;
-import org.apache.impala.thrift.TResultSet;
-import org.apache.thrift.TException;
-
-/**
- * A wrapper around some of Hive's Metastore API's to abstract away differences
- * between major versions of Hive. This implements the shimmed methods for Hive 2.
- */
-public class MetastoreShim {
-  /**
-   * Wrapper around MetaStoreUtils.validateName() to deal with added arguments.
-   */
-  public static boolean validateName(String name) {
-    return MetaStoreUtils.validateName(name);
-  }
-
-  /**
-   * Wrapper around IMetaStoreClient.alter_partition() to deal with added
-   * arguments.
-   */
-  public static void alterPartition(IMetaStoreClient client, Partition partition)
-      throws InvalidOperationException, MetaException, TException {
-    client.alter_partition(partition.getDbName(), partition.getTableName(), partition);
-  }
-
-  /**
-   * Wrapper around IMetaStoreClient.alter_partitions() to deal with added
-   * arguments.
-   */
-  public static void alterPartitions(IMetaStoreClient client, String dbName,
-      String tableName, List<Partition> partitions)
-      throws InvalidOperationException, MetaException, TException {
-    client.alter_partitions(dbName, tableName, partitions);
-  }
-
-  /**
-   * Wrapper around MetaStoreUtils.updatePartitionStatsFast() to deal with added
-   * arguments.
-   */
-  public static void updatePartitionStatsFast(Partition partition, Warehouse warehouse)
-      throws MetaException {
-    MetaStoreUtils.updatePartitionStatsFast(partition, warehouse);
-  }
-
-  /**
-   * Return the maximum number of Metastore objects that should be retrieved in
-   * a batch.
-   */
-  public static String metastoreBatchRetrieveObjectsMaxConfigKey() {
-    return HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX.toString();
-  }
-
-  /**
-   * Return the key and value that should be set in the partition parameters to
-   * mark that the stats were generated automatically by a stats task.
-   */
-  public static Pair<String, String> statsGeneratedViaStatsTaskParam() {
-    return Pair.create(
-        StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK, StatsSetupConst.TRUE);
-  }
-
-  public static TResultSet execGetFunctions(
-      Frontend frontend, TMetadataOpRequest request, User user) throws ImpalaException {
-    TGetFunctionsReq req = request.getGet_functions_req();
-    return MetadataOp.getFunctions(
-        frontend, req.getCatalogName(), req.getSchemaName(), req.getFunctionName(), user);
-  }
-
-  public static TResultSet execGetColumns(
-      Frontend frontend, TMetadataOpRequest request, User user) throws ImpalaException {
-    TGetColumnsReq req = request.getGet_columns_req();
-    return MetadataOp.getColumns(frontend, req.getCatalogName(), req.getSchemaName(),
-        req.getTableName(), req.getColumnName(), user);
-  }
-
-  public static TResultSet execGetTables(
-      Frontend frontend, TMetadataOpRequest request, User user) throws ImpalaException {
-    TGetTablesReq req = request.getGet_tables_req();
-    return MetadataOp.getTables(frontend, req.getCatalogName(), req.getSchemaName(),
-        req.getTableName(), req.getTableTypes(), user);
-  }
-
-  public static TResultSet execGetSchemas(
-      Frontend frontend, TMetadataOpRequest request, User user) throws ImpalaException {
-    TGetSchemasReq req = request.getGet_schemas_req();
-    return MetadataOp.getSchemas(
-        frontend, req.getCatalogName(), req.getSchemaName(), user);
-  }
-}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-2/java/org/apache/impala/compat/MiniclusterProfile.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-2/java/org/apache/impala/compat/MiniclusterProfile.java b/fe/src/compat-minicluster-profile-2/java/org/apache/impala/compat/MiniclusterProfile.java
deleted file mode 100644
index 330035e..0000000
--- a/fe/src/compat-minicluster-profile-2/java/org/apache/impala/compat/MiniclusterProfile.java
+++ /dev/null
@@ -1,25 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.impala.compat;
-
-/**
- * Constant to tell us what Minicluster Profile we are built against.
- */
-public class MiniclusterProfile {
-  public static final int MINICLUSTER_PROFILE = 2;
-}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-2/java/org/apache/impala/util/SentryUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-2/java/org/apache/impala/util/SentryUtil.java b/fe/src/compat-minicluster-profile-2/java/org/apache/impala/util/SentryUtil.java
deleted file mode 100644
index 5d2eadc..0000000
--- a/fe/src/compat-minicluster-profile-2/java/org/apache/impala/util/SentryUtil.java
+++ /dev/null
@@ -1,49 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-package org.apache.impala.util;
-
-import java.util.Set;
-
-import org.apache.sentry.provider.db.SentryAccessDeniedException;
-import org.apache.sentry.provider.db.SentryAlreadyExistsException;
-import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient;
-import org.apache.sentry.provider.db.service.thrift.TSentryRole;
-import org.apache.sentry.SentryUserException;
-
-/**
- * Wrapper to facilitate differences in Sentry APIs across Sentry versions.
- */
-public class SentryUtil {
-  static boolean isSentryAlreadyExists(Exception e) {
-    return e instanceof SentryAlreadyExistsException;
-  }
-
-  static boolean isSentryAccessDenied(Exception e) {
-    return e instanceof SentryAccessDeniedException;
-  }
-
-  public static boolean isSentryGroupNotFound(Exception e) {
-    // Sentry 1.5 does not have this exception
-    return false;
-  }
-
-  static Set<TSentryRole> listRoles(SentryPolicyServiceClient client, String username)
-      throws SentryUserException {
-    return client.listRoles(username);
-  }
-}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-2/test/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssignment.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-2/test/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssignment.java b/fe/src/compat-minicluster-profile-2/test/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssignment.java
deleted file mode 100644
index f8c1ae9..0000000
--- a/fe/src/compat-minicluster-profile-2/test/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssignment.java
+++ /dev/null
@@ -1,139 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.impala.datagenerator;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableMap;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.util.PairOfSameType;
-import org.apache.hadoop.hbase.util.Threads;
-import org.apache.impala.planner.HBaseScanNode;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
-
-/**
- * Deterministically assigns regions to region servers.
- */
-public class HBaseTestDataRegionAssignment {
-  public class TableNotFoundException extends Exception {
-    public TableNotFoundException(String s) {
-      super(s);
-    }
-  }
-
-  private final static Logger LOG = LoggerFactory.getLogger(
-      HBaseTestDataRegionAssignment.class);
-  private final Configuration conf;
-  private final HBaseAdmin hbaseAdmin;
-  private final List<ServerName> sortedRS; // sorted list of region server name
-  private final String[] splitPoints = { "1", "3", "5", "7", "9"};
-
-  public HBaseTestDataRegionAssignment() throws IOException {
-    conf = new Configuration();
-    hbaseAdmin = new HBaseAdmin(conf);
-    ClusterStatus clusterStatus = hbaseAdmin.getClusterStatus();
-    Collection<ServerName> regionServerNames = clusterStatus.getServers();
-    sortedRS = new ArrayList<ServerName>(regionServerNames);
-    Collections.sort(sortedRS);
-  }
-
-  public void close() throws IOException {
-    hbaseAdmin.close();
-  }
-
-  /**
-   * The table comes in already split into regions specified by splitPoints and with data
-   * already loaded. Pair up adjacent regions and assign to the same server.
-   * Each region pair in ([unbound:1,1:3], [3:5,5:7], [7:9,9:unbound])
-   * will be on the same server.
-   */
-  public void performAssignment(String tableName) throws IOException,
-    InterruptedException, TableNotFoundException {
-    HTableDescriptor[] desc = hbaseAdmin.listTables(tableName);
-    if (desc == null || desc.length == 0) {
-      throw new TableNotFoundException("Table " + tableName + " not found.");
-    }
-
-    // Sort the region by start key
-    List<HRegionInfo> regions = hbaseAdmin.getTableRegions(tableName.getBytes());
-    Preconditions.checkArgument(regions.size() == splitPoints.length + 1);
-    Collections.sort(regions);
-
-    // Pair up two adjacent regions to the same region server. That is,
-    // region server 1 <- regions (unbound:1), (1:3)
-    // region server 2 <- regions (3:5), (5:7)
-    // region server 3 <- regions (7:9), (9:unbound)
-    NavigableMap<HRegionInfo, ServerName> expectedLocs = Maps.newTreeMap();
-    for (int i = 0; i < regions.size(); ++i) {
-      HRegionInfo regionInfo = regions.get(i);
-      int rsIdx = (i / 2) % sortedRS.size();
-      ServerName regionServerName = sortedRS.get(rsIdx);
-      hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(),
-          regionServerName.getServerName().getBytes());
-      expectedLocs.put(regionInfo, regionServerName);
-    }
-
-    // hbaseAdmin.move() is an asynchronous operation. HBase tests use sleep to wait for
-    // the move to complete. It should be done in 10sec.
-    int sleepCnt = 0;
-    HTable hbaseTable = new HTable(conf, tableName);
-    try {
-      while(!expectedLocs.equals(hbaseTable.getRegionLocations()) &&
-          sleepCnt < 100) {
-        Thread.sleep(100);
-        ++sleepCnt;
-      }
-      NavigableMap<HRegionInfo, ServerName> actualLocs = hbaseTable.getRegionLocations();
-      Preconditions.checkArgument(expectedLocs.equals(actualLocs));
-
-      // Log the actual region location map
-      for (Map.Entry<HRegionInfo, ServerName> entry: actualLocs.entrySet()) {
-        LOG.info(HBaseScanNode.printKey(entry.getKey().getStartKey()) + " -> " +
-            entry.getValue().getHostAndPort());
-      }
-
-      // Force a major compaction such that the HBase table is backed by deterministic
-      // physical artifacts (files, WAL, etc.). Our #rows estimate relies on the sizes of
-      // these physical artifacts.
-      LOG.info("Major compacting HBase table: " + tableName);
-      hbaseAdmin.majorCompact(tableName);
-    } finally {
-      IOUtils.closeQuietly(hbaseTable);
-    }
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-3/java/org/apache/impala/analysis/ParquetHelper.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/analysis/ParquetHelper.java b/fe/src/compat-minicluster-profile-3/java/org/apache/impala/analysis/ParquetHelper.java
deleted file mode 100644
index 8c9bff8..0000000
--- a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/analysis/ParquetHelper.java
+++ /dev/null
@@ -1,341 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.impala.analysis;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.parquet.hadoop.metadata.ParquetMetadata;
-import org.apache.parquet.hadoop.ParquetFileReader;
-import org.apache.parquet.schema.OriginalType;
-import org.apache.parquet.schema.PrimitiveType;
-
-import org.apache.impala.catalog.ArrayType;
-import org.apache.impala.catalog.MapType;
-import org.apache.impala.catalog.ScalarType;
-import org.apache.impala.catalog.StructField;
-import org.apache.impala.catalog.StructType;
-import org.apache.impala.catalog.Type;
-import org.apache.impala.common.AnalysisException;
-import org.apache.impala.common.FileSystemUtil;
-
-/**
- * Provides extractParquetSchema() to extract a schema
- * from a parquet file.
- *
- * Because Parquet's Java package changed between Parquet 1.5
- * and 1.9, a second copy of this file, with "org.apache.parquet." replaced
- * with "org.apache.org.apache.parquet." is generated by the build system.
- */
-class ParquetHelper {
-  private final static String ERROR_MSG =
-      "Failed to convert Parquet type\n%s\nto an Impala %s type:\n%s\n";
-
-  /**
-   * Reads the first block from the given HDFS file and returns the Parquet schema.
-   * Throws Analysis exception for any failure, such as failing to read the file
-   * or failing to parse the contents.
-   */
-  private static org.apache.parquet.schema.MessageType loadParquetSchema(Path pathToFile)
-      throws AnalysisException {
-    try {
-      FileSystem fs = pathToFile.getFileSystem(FileSystemUtil.getConfiguration());
-      if (!fs.isFile(pathToFile)) {
-        throw new AnalysisException("Cannot infer schema, path is not a file: " +
-                                    pathToFile);
-      }
-    } catch (IOException e) {
-      throw new AnalysisException("Failed to connect to filesystem:" + e);
-    } catch (IllegalArgumentException e) {
-      throw new AnalysisException(e.getMessage());
-    }
-    ParquetMetadata readFooter = null;
-    try {
-      readFooter = ParquetFileReader.readFooter(FileSystemUtil.getConfiguration(),
-          pathToFile);
-    } catch (FileNotFoundException e) {
-      throw new AnalysisException("File not found: " + e);
-    } catch (IOException e) {
-      throw new AnalysisException("Failed to open file as a parquet file: " + e);
-    } catch (RuntimeException e) {
-      // Parquet throws a generic RuntimeException when reading a non-parquet file
-      if (e.toString().contains("is not a Parquet file")) {
-        throw new AnalysisException("File is not a parquet file: " + pathToFile);
-      }
-      // otherwise, who knows what we caught, throw it back up
-      throw e;
-    }
-     return readFooter.getFileMetaData().getSchema();
-  }
-
-  /**
-   * Converts a "primitive" Parquet type to an Impala type.
-   * A primitive type is a non-nested type with no annotations.
-   */
-  private static Type convertPrimitiveParquetType(org.apache.parquet.schema.Type parquetType)
-      throws AnalysisException {
-    Preconditions.checkState(parquetType.isPrimitive());
-    PrimitiveType prim = parquetType.asPrimitiveType();
-    switch (prim.getPrimitiveTypeName()) {
-      case BINARY: return Type.STRING;
-      case BOOLEAN: return Type.BOOLEAN;
-      case DOUBLE: return Type.DOUBLE;
-      case FIXED_LEN_BYTE_ARRAY:
-        throw new AnalysisException(
-            "Unsupported parquet type FIXED_LEN_BYTE_ARRAY for field " +
-                parquetType.getName());
-      case FLOAT: return Type.FLOAT;
-      case INT32: return Type.INT;
-      case INT64: return Type.BIGINT;
-      case INT96: return Type.TIMESTAMP;
-      default:
-        Preconditions.checkState(false, "Unexpected parquet primitive type: " +
-               prim.getPrimitiveTypeName());
-        return null;
-    }
-  }
-
-  /**
-   * Converts a Parquet group type to an Impala map Type. We support both standard
-   * Parquet map representations, as well as legacy. Legacy representations are handled
-   * according to this specification:
-   * https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#backward-compatibility-rules-1
-   *
-   * Standard representation of a map in Parquet:
-   * <optional | required> group <name> (MAP) { <-- outerGroup is pointing at this
-   * repeated group key_value {
-   *     required <key-type> key;
-   *     <optional | required> <value-type> value;
-   *   }
-   * }
-   */
-  private static MapType convertMap(org.apache.parquet.schema.GroupType outerGroup)
-      throws AnalysisException {
-    if (outerGroup.getFieldCount() != 1){
-      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
-          "MAP", "The logical MAP type must have exactly 1 inner field."));
-    }
-
-    org.apache.parquet.schema.Type innerField = outerGroup.getType(0);
-    if (!innerField.isRepetition(org.apache.parquet.schema.Type.Repetition.REPEATED)){
-      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
-          "MAP", "The logical MAP type must have a repeated inner field."));
-    }
-    if (innerField.isPrimitive()) {
-      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
-          "MAP", "The inner field of the logical MAP type must be a group."));
-    }
-
-    org.apache.parquet.schema.GroupType innerGroup = innerField.asGroupType();
-    // It does not matter whether innerGroup has an annotation or not (for example it may
-    // be annotated with MAP_KEY_VALUE). We treat the case that innerGroup has an
-    // annotation and the case the innerGroup does not have an annotation the same.
-    if (innerGroup.getFieldCount() != 2) {
-      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
-          "MAP", "The inner field of the logical MAP type must have exactly 2 fields."));
-    }
-
-    org.apache.parquet.schema.Type key = innerGroup.getType(0);
-    if (!key.getName().equals("key")) {
-      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
-          "MAP", "The name of the first field of the inner field of the logical MAP " +
-          "type must be 'key'"));
-    }
-    if (!key.isPrimitive()) {
-      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
-          "MAP", "The key type of the logical MAP type must be primitive."));
-    }
-    org.apache.parquet.schema.Type value = innerGroup.getType(1);
-    if (!value.getName().equals("value")) {
-      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
-          "MAP", "The name of the second field of the inner field of the logical MAP " +
-          "type must be 'value'"));
-    }
-
-    return new MapType(convertParquetType(key), convertParquetType(value));
-  }
-
-  /**
-   * Converts a Parquet group type to an Impala struct Type.
-   */
-  private static StructType convertStruct(org.apache.parquet.schema.GroupType outerGroup)
-      throws AnalysisException {
-    ArrayList<StructField> structFields = new ArrayList<StructField>();
-    for (org.apache.parquet.schema.Type field: outerGroup.getFields()) {
-      StructField f = new StructField(field.getName(), convertParquetType(field));
-      structFields.add(f);
-    }
-    return new StructType(structFields);
-  }
-
-  /**
-   * Converts a Parquet group type to an Impala array Type. We can handle the standard
-   * representation, but also legacy representations for backwards compatibility.
-   * Legacy representations are handled according to this specification:
-   * https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#backward-compatibility-rules
-   *
-   * Standard representation of an array in Parquet:
-   * <optional | required> group <name> (LIST) { <-- outerGroup is pointing at this
-   *   repeated group list {
-   *     <optional | required> <element-type> element;
-   *   }
-   * }
-   */
-  private static ArrayType convertArray(org.apache.parquet.schema.GroupType outerGroup)
-      throws AnalysisException {
-    if (outerGroup.getFieldCount() != 1) {
-      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
-          "LIST", "The logical LIST type must have exactly 1 inner field."));
-    }
-
-    org.apache.parquet.schema.Type innerField = outerGroup.getType(0);
-    if (!innerField.isRepetition(org.apache.parquet.schema.Type.Repetition.REPEATED)) {
-      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
-          "LIST", "The inner field of the logical LIST type must be repeated."));
-    }
-    if (innerField.isPrimitive() || innerField.getOriginalType() != null) {
-      // From the Parquet Spec:
-      // 1. If the repeated field is not a group then it's type is the element type.
-      //
-      // If innerField is a group, but originalType is not null, the element type is
-      // based on the logical type.
-      return new ArrayType(convertParquetType(innerField));
-    }
-
-    org.apache.parquet.schema.GroupType innerGroup = innerField.asGroupType();
-    if (innerGroup.getFieldCount() != 1) {
-      // From the Parquet Spec:
-      // 2. If the repeated field is a group with multiple fields, then it's type is a
-      //    struct.
-      return new ArrayType(convertStruct(innerGroup));
-    }
-
-    return new ArrayType(convertParquetType(innerGroup.getType(0)));
-  }
-
-  /**
-   * Converts a "logical" Parquet type to an Impala column type.
-   * A Parquet type is considered logical when it has an annotation. The annotation is
-   * stored as a "OriginalType". The Parquet documentation refers to these as logical
-   * types, so we use that terminology here.
-   */
-  private static Type convertLogicalParquetType(org.apache.parquet.schema.Type parquetType)
-      throws AnalysisException {
-    OriginalType orig = parquetType.getOriginalType();
-    if (orig == OriginalType.LIST) {
-      return convertArray(parquetType.asGroupType());
-    }
-    if (orig == OriginalType.MAP || orig == OriginalType.MAP_KEY_VALUE) {
-      // MAP_KEY_VALUE annotation should not be used any more. However, according to the
-      // Parquet spec, some existing data incorrectly uses MAP_KEY_VALUE in place of MAP.
-      // For backward-compatibility, a group annotated with MAP_KEY_VALUE that is not
-      // contained by a MAP-annotated group should be handled as a MAP-annotated group.
-      return convertMap(parquetType.asGroupType());
-    }
-
-    PrimitiveType prim = parquetType.asPrimitiveType();
-    if (prim.getPrimitiveTypeName() == PrimitiveType.PrimitiveTypeName.BINARY &&
-        (orig == OriginalType.UTF8 || orig == OriginalType.ENUM)) {
-      // UTF8 is the type annotation Parquet uses for strings
-      // ENUM is the type annotation Parquet uses to indicate that
-      // the original data type, before conversion to parquet, had been enum.
-      // Applications which do not have enumerated types (e.g. Impala)
-      // should interpret it as a string.
-      // We check to make sure it applies to BINARY to avoid errors if there is a bad
-      // annotation.
-      return Type.STRING;
-    }
-
-    if (prim.getPrimitiveTypeName() == PrimitiveType.PrimitiveTypeName.INT32
-        || prim.getPrimitiveTypeName() == PrimitiveType.PrimitiveTypeName.INT64) {
-      // Map signed integer types to an supported Impala column type
-      switch (orig) {
-        case INT_8: return Type.TINYINT;
-        case INT_16: return Type.SMALLINT;
-        case INT_32: return Type.INT;
-        case INT_64: return Type.BIGINT;
-      }
-    }
-
-    if (orig == OriginalType.DECIMAL) {
-      return ScalarType.createDecimalType(prim.getDecimalMetadata().getPrecision(),
-                                           prim.getDecimalMetadata().getScale());
-    }
-
-    throw new AnalysisException(
-        "Unsupported logical parquet type " + orig + " (primitive type is " +
-            prim.getPrimitiveTypeName().name() + ") for field " +
-            parquetType.getName());
-  }
-
-  /**
-   * Converts a Parquet type into an Impala type.
-   */
-  private static Type convertParquetType(org.apache.parquet.schema.Type field)
-      throws AnalysisException {
-    Type type = null;
-    // TODO for 2.3: If a field is not annotated with LIST, it can still be sometimes
-    // interpreted as an array. The following 2 examples should be interpreted as an array
-    // of integers, but this is currently not done.
-    // 1. repeated int int_col;
-    // 2. required group int_arr {
-    //      repeated group list {
-    //        required int element;
-    //      }
-    //    }
-    if (field.getOriginalType() != null) {
-      type = convertLogicalParquetType(field);
-    } else if (field.isPrimitive()) {
-      type = convertPrimitiveParquetType(field);
-    } else {
-      // If field is not primitive, it must be a struct.
-      type = convertStruct(field.asGroupType());
-    }
-    return type;
-  }
-
-  /**
-   * Parses a Parquet file stored in HDFS and returns the corresponding Impala schema.
-   * This fails with an analysis exception if any errors occur reading the file,
-   * parsing the Parquet schema, or if the Parquet types cannot be represented in Impala.
-   */
-  static List<ColumnDef> extractParquetSchema(HdfsUri location)
-      throws AnalysisException {
-    org.apache.parquet.schema.MessageType parquetSchema = loadParquetSchema(location.getPath());
-    List<org.apache.parquet.schema.Type> fields = parquetSchema.getFields();
-    List<ColumnDef> schema = new ArrayList<ColumnDef>();
-
-    for (org.apache.parquet.schema.Type field: fields) {
-      Type type = convertParquetType(field);
-      Preconditions.checkNotNull(type);
-      String colName = field.getName();
-      Map<ColumnDef.Option, Object> option = Maps.newHashMap();
-      option.put(ColumnDef.Option.COMMENT, "Inferred from Parquet file.");
-      schema.add(new ColumnDef(colName, new TypeDef(type), option));
-    }
-    return schema;
-  }
-}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-3/java/org/apache/impala/authorization/ImpalaActionFactory.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/authorization/ImpalaActionFactory.java b/fe/src/compat-minicluster-profile-3/java/org/apache/impala/authorization/ImpalaActionFactory.java
deleted file mode 100644
index c3ef004..0000000
--- a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/authorization/ImpalaActionFactory.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.impala.authorization;
-
-import com.google.common.base.Preconditions;
-import org.apache.impala.authorization.Privilege.ImpalaAction;
-import org.apache.sentry.core.common.BitFieldAction;
-import org.apache.sentry.core.common.BitFieldActionFactory;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * An implementation of BitFieldActionFactory for Impala.
- */
-public class ImpalaActionFactory extends BitFieldActionFactory {
-  @Override
-  public List<? extends BitFieldAction> getActionsByCode(int actionCode) {
-    Preconditions.checkArgument(
-        actionCode >= 1 && actionCode <= ImpalaAction.ALL.getCode(),
-        String.format("Action code must between 1 and %d.", ImpalaAction.ALL.getCode()));
-
-    List<BitFieldAction> actions = new ArrayList<>();
-    for (ImpalaAction action : ImpalaAction.values()) {
-      if ((action.getCode() & actionCode) == action.getCode()) {
-        actions.add(action.getBitFieldAction());
-      }
-    }
-    return actions;
-  }
-
-  @Override
-  public BitFieldAction getActionByName(String name) {
-    Preconditions.checkNotNull(name);
-
-    for (ImpalaAction action : ImpalaAction.values()) {
-      if (action.getValue().equalsIgnoreCase(name)) {
-        return action.getBitFieldAction();
-      }
-    }
-    return null;
-  }
-}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-3/java/org/apache/impala/authorization/ImpalaPrivilegeModel.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/authorization/ImpalaPrivilegeModel.java b/fe/src/compat-minicluster-profile-3/java/org/apache/impala/authorization/ImpalaPrivilegeModel.java
deleted file mode 100644
index 43a194e..0000000
--- a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/authorization/ImpalaPrivilegeModel.java
+++ /dev/null
@@ -1,43 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-package org.apache.impala.authorization;
-
-import java.util.Map;
-
-import org.apache.sentry.core.common.BitFieldActionFactory;
-import org.apache.sentry.core.common.ImplyMethodType;
-import org.apache.sentry.core.model.db.HivePrivilegeModel;
-import org.apache.sentry.core.common.Model;
-
-/**
- * Delegates to HivePrivilegeModel for getImplyMethodMap(), but
- * uses Impala's BitFieldActionFactory implementation.
- */
-public class ImpalaPrivilegeModel implements Model {
-  public static final ImpalaPrivilegeModel INSTANCE = new ImpalaPrivilegeModel();
-  private final ImpalaActionFactory actionFactory = new ImpalaActionFactory();
-
-  @Override
-  public Map<String, ImplyMethodType> getImplyMethodMap() {
-    return HivePrivilegeModel.getInstance().getImplyMethodMap();
-  }
-
-  @Override
-  public BitFieldActionFactory getBitFieldActionFactory() {
-    return actionFactory;
-  }
-}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-3/java/org/apache/impala/authorization/SentryAuthProvider.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/authorization/SentryAuthProvider.java b/fe/src/compat-minicluster-profile-3/java/org/apache/impala/authorization/SentryAuthProvider.java
deleted file mode 100644
index a4f0743..0000000
--- a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/authorization/SentryAuthProvider.java
+++ /dev/null
@@ -1,80 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.impala.authorization;
-
-import com.google.common.base.Preconditions;
-
-import org.apache.impala.catalog.AuthorizationPolicy;
-
-import org.apache.commons.lang.reflect.ConstructorUtils;
-import org.apache.sentry.core.common.Model;
-import org.apache.sentry.core.model.db.HivePrivilegeModel;
-import org.apache.sentry.policy.common.PolicyEngine;
-import org.apache.sentry.policy.engine.common.CommonPolicyEngine;
-import org.apache.sentry.provider.cache.SimpleCacheProviderBackend;
-import org.apache.sentry.provider.common.ProviderBackend;
-import org.apache.sentry.provider.common.ProviderBackendContext;
-import org.apache.sentry.provider.common.ResourceAuthorizationProvider;
-import org.apache.sentry.provider.file.SimpleFileProviderBackend;
-
-/**
- * Wrapper to facilitate differences in Sentry APIs across Sentry versions.
- */
-class SentryAuthProvider {
-  /*
-   * Creates a new ResourceAuthorizationProvider based on the given configuration.
-   */
-  static ResourceAuthorizationProvider createProvider(AuthorizationConfig config,
-      AuthorizationPolicy policy) {
-    try {
-      ProviderBackend providerBe;
-      // Create the appropriate backend provider.
-      if (config.isFileBasedPolicy()) {
-        providerBe = new SimpleFileProviderBackend(config.getSentryConfig().getConfig(),
-            config.getPolicyFile());
-        ProviderBackendContext context = new ProviderBackendContext();
-        providerBe.initialize(context);
-      } else {
-        // Note: The second parameter to the ProviderBackend is a "resourceFile" path
-        // which is not used by Impala. We cannot pass 'null' so instead pass an empty
-        // string.
-        providerBe = new SimpleCacheProviderBackend(config.getSentryConfig().getConfig(),
-            "");
-        Preconditions.checkNotNull(policy);
-        ProviderBackendContext context = new ProviderBackendContext();
-        context.setBindingHandle(policy);
-        providerBe.initialize(context);
-      }
-
-      CommonPolicyEngine engine =
-          new CommonPolicyEngine(providerBe);
-
-      // Try to create an instance of the specified policy provider class.
-      // Re-throw any exceptions that are encountered.
-      String policyFile = config.getPolicyFile() == null ? "" : config.getPolicyFile();
-
-      return (ResourceAuthorizationProvider) ConstructorUtils.invokeConstructor(
-          Class.forName(config.getPolicyProviderClassName()),
-          new Object[] {policyFile, engine, ImpalaPrivilegeModel.INSTANCE});
-    } catch (Exception e) {
-      // Re-throw as unchecked exception.
-      throw new IllegalStateException(
-          "Error creating ResourceAuthorizationProvider: ", e);
-    }
-  }
-}


[3/5] impala git commit: IMPALA-7295: Remove IMPALA_MINICLUSTER_PROFILE=2

Posted by mi...@apache.org.
http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-3/java/org/apache/impala/compat/HdfsShim.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/compat/HdfsShim.java b/fe/src/compat-minicluster-profile-3/java/org/apache/impala/compat/HdfsShim.java
deleted file mode 100644
index 9453f80..0000000
--- a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/compat/HdfsShim.java
+++ /dev/null
@@ -1,30 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.impala.compat;
-
-import org.apache.hadoop.fs.FileStatus;
-
-/**
- * Wrapper classes to abstract away differences between HDFS versions in
- * the MiniCluster profiles.
- */
-public class HdfsShim {
-  public static boolean isErasureCoded(FileStatus fileStatus) {
-    return fileStatus.isErasureCoded();
-  }
-}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-3/java/org/apache/impala/compat/MetastoreShim.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/compat/MetastoreShim.java b/fe/src/compat-minicluster-profile-3/java/org/apache/impala/compat/MetastoreShim.java
deleted file mode 100644
index 3d69545..0000000
--- a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/compat/MetastoreShim.java
+++ /dev/null
@@ -1,127 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.impala.compat;
-
-import java.util.List;
-
-import org.apache.hadoop.hive.common.StatsSetupConst;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.IMetaStoreClient;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.Warehouse;
-import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hive.service.rpc.thrift.TGetColumnsReq;
-import org.apache.hive.service.rpc.thrift.TGetFunctionsReq;
-import org.apache.hive.service.rpc.thrift.TGetSchemasReq;
-import org.apache.hive.service.rpc.thrift.TGetTablesReq;
-import org.apache.impala.authorization.User;
-import org.apache.impala.common.ImpalaException;
-import org.apache.impala.common.Pair;
-import org.apache.impala.service.Frontend;
-import org.apache.impala.service.MetadataOp;
-import org.apache.impala.thrift.TMetadataOpRequest;
-import org.apache.impala.thrift.TResultSet;
-import org.apache.thrift.TException;
-
-/**
- * A wrapper around some of Hive's Metastore API's to abstract away differences
- * between major versions of Hive. This implements the shimmed methods for Hive 2.
- */
-public class MetastoreShim {
-  /**
-   * Wrapper around MetaStoreUtils.validateName() to deal with added arguments.
-   */
-  public static boolean validateName(String name) {
-    return MetaStoreUtils.validateName(name, null);
-  }
-
-  /**
-   * Wrapper around IMetaStoreClient.alter_partition() to deal with added
-   * arguments.
-   */
-  public static void alterPartition(IMetaStoreClient client, Partition partition)
-      throws InvalidOperationException, MetaException, TException {
-    client.alter_partition(
-        partition.getDbName(), partition.getTableName(), partition, null);
-  }
-
-  /**
-   * Wrapper around IMetaStoreClient.alter_partitions() to deal with added
-   * arguments.
-   */
-  public static void alterPartitions(IMetaStoreClient client, String dbName,
-      String tableName, List<Partition> partitions)
-      throws InvalidOperationException, MetaException, TException {
-    client.alter_partitions(dbName, tableName, partitions, null);
-  }
-
-  /**
-   * Wrapper around MetaStoreUtils.updatePartitionStatsFast() to deal with added
-   * arguments.
-   */
-  public static void updatePartitionStatsFast(Partition partition, Warehouse warehouse)
-      throws MetaException {
-    MetaStoreUtils.updatePartitionStatsFast(partition, warehouse, null);
-  }
-
-  /**
-   * Return the maximum number of Metastore objects that should be retrieved in
-   * a batch.
-   */
-  public static String metastoreBatchRetrieveObjectsMaxConfigKey() {
-    return HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX.toString();
-  }
-
-  /**
-   * Return the key and value that should be set in the partition parameters to
-   * mark that the stats were generated automatically by a stats task.
-   */
-  public static Pair<String, String> statsGeneratedViaStatsTaskParam() {
-    return Pair.create(StatsSetupConst.STATS_GENERATED, StatsSetupConst.TASK);
-  }
-
-  public static TResultSet execGetFunctions(
-      Frontend frontend, TMetadataOpRequest request, User user) throws ImpalaException {
-    TGetFunctionsReq req = request.getGet_functions_req();
-    return MetadataOp.getFunctions(
-        frontend, req.getCatalogName(), req.getSchemaName(), req.getFunctionName(), user);
-  }
-
-  public static TResultSet execGetColumns(
-      Frontend frontend, TMetadataOpRequest request, User user) throws ImpalaException {
-    TGetColumnsReq req = request.getGet_columns_req();
-    return MetadataOp.getColumns(frontend, req.getCatalogName(), req.getSchemaName(),
-        req.getTableName(), req.getColumnName(), user);
-  }
-
-  public static TResultSet execGetTables(
-      Frontend frontend, TMetadataOpRequest request, User user) throws ImpalaException {
-    TGetTablesReq req = request.getGet_tables_req();
-    return MetadataOp.getTables(frontend, req.getCatalogName(), req.getSchemaName(),
-        req.getTableName(), req.getTableTypes(), user);
-  }
-
-  public static TResultSet execGetSchemas(
-      Frontend frontend, TMetadataOpRequest request, User user) throws ImpalaException {
-    TGetSchemasReq req = request.getGet_schemas_req();
-    return MetadataOp.getSchemas(
-        frontend, req.getCatalogName(), req.getSchemaName(), user);
-  }
-}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-3/java/org/apache/impala/compat/MiniclusterProfile.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/compat/MiniclusterProfile.java b/fe/src/compat-minicluster-profile-3/java/org/apache/impala/compat/MiniclusterProfile.java
deleted file mode 100644
index 9f9c36c..0000000
--- a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/compat/MiniclusterProfile.java
+++ /dev/null
@@ -1,25 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.impala.compat;
-
-/**
- * Constant to tell us what Minicluster Profile we are built against.
- */
-public class MiniclusterProfile {
-  public static final int MINICLUSTER_PROFILE = 3;
-}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-3/java/org/apache/impala/util/SentryUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/util/SentryUtil.java b/fe/src/compat-minicluster-profile-3/java/org/apache/impala/util/SentryUtil.java
deleted file mode 100644
index f85e890..0000000
--- a/fe/src/compat-minicluster-profile-3/java/org/apache/impala/util/SentryUtil.java
+++ /dev/null
@@ -1,54 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.impala.util;
-
-import java.util.Set;
-
-import org.apache.sentry.core.common.exception.SentryAccessDeniedException;
-import org.apache.sentry.core.common.exception.SentryAlreadyExistsException;
-import org.apache.sentry.core.common.exception.SentryGroupNotFoundException;
-import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient;
-import org.apache.sentry.provider.db.service.thrift.TSentryRole;
-// See IMPALA-5540. Sentry over-shades itself (to avoid leaking Thrift),
-// causing this unusual package name. In the code below, we typically
-// check for either variant when it's available in the classpath.
-import sentry.org.apache.sentry.core.common.exception.SentryUserException;
-
-/**
- * Wrapper to facilitate differences in Sentry APIs across Sentry versions.
- */
-public class SentryUtil {
-  static boolean isSentryAlreadyExists(Exception e) {
-    return e instanceof SentryAlreadyExistsException || e instanceof
-      sentry.org.apache.sentry.core.common.exception.SentryAlreadyExistsException;
-  }
-
-  static boolean isSentryAccessDenied(Exception e) {
-    return e instanceof SentryAccessDeniedException || e instanceof
-      sentry.org.apache.sentry.core.common.exception.SentryAccessDeniedException;
-  }
-
-  public static boolean isSentryGroupNotFound(Exception e) {
-    return e instanceof SentryGroupNotFoundException;
-  }
-
-  static Set<TSentryRole> listRoles(SentryPolicyServiceClient client, String username)
-      throws SentryUserException {
-    return client.listAllRoles(username);
-  }
-}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-3/test/java/org/apache/impala/authorization/ImpalaActionFactoryTest.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-3/test/java/org/apache/impala/authorization/ImpalaActionFactoryTest.java b/fe/src/compat-minicluster-profile-3/test/java/org/apache/impala/authorization/ImpalaActionFactoryTest.java
deleted file mode 100644
index bd39839..0000000
--- a/fe/src/compat-minicluster-profile-3/test/java/org/apache/impala/authorization/ImpalaActionFactoryTest.java
+++ /dev/null
@@ -1,132 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-package org.apache.impala.authorization;
-
-import com.google.common.collect.Lists;
-import org.apache.impala.authorization.Privilege.ImpalaAction;
-import org.apache.sentry.core.common.BitFieldAction;
-import org.junit.Test;
-
-import java.util.List;
-import java.util.Random;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.fail;
-
-public class ImpalaActionFactoryTest {
-  @Test
-  public void testGetActionsByCode() {
-    ImpalaActionFactory factory = new ImpalaActionFactory();
-
-    List<? extends BitFieldAction> actual = factory.getActionsByCode(
-        ImpalaAction.SELECT.getCode() |
-        ImpalaAction.INSERT.getCode() |
-        ImpalaAction.CREATE.getCode());
-    List<ImpalaAction> expected = Lists.newArrayList(
-        ImpalaAction.SELECT,
-        ImpalaAction.INSERT,
-        ImpalaAction.CREATE);
-    assertBitFieldActions(expected, actual);
-
-    actual = factory.getActionsByCode(
-        ImpalaAction.SELECT.getCode() |
-        ImpalaAction.INSERT.getCode() |
-        ImpalaAction.ALTER.getCode() |
-        ImpalaAction.CREATE.getCode() |
-        ImpalaAction.DROP.getCode() |
-        ImpalaAction.REFRESH.getCode());
-    expected = Lists.newArrayList(
-         ImpalaAction.SELECT,
-         ImpalaAction.INSERT,
-         ImpalaAction.ALTER,
-         ImpalaAction.CREATE,
-         ImpalaAction.DROP,
-         ImpalaAction.REFRESH,
-         ImpalaAction.ALL);
-     assertBitFieldActions(expected, actual);
-
-    actual = factory.getActionsByCode(ImpalaAction.ALL.getCode());
-    expected = Lists.newArrayList(
-        ImpalaAction.SELECT,
-        ImpalaAction.INSERT,
-        ImpalaAction.ALTER,
-        ImpalaAction.CREATE,
-        ImpalaAction.DROP,
-        ImpalaAction.REFRESH,
-        ImpalaAction.ALL);
-    assertBitFieldActions(expected, actual);
-
-    try {
-      factory.getActionsByCode(Integer.MAX_VALUE);
-      fail("IllegalArgumentException should be thrown.");
-    } catch (IllegalArgumentException e) {
-      assertEquals(String.format("Action code must between 1 and %d.",
-          ImpalaAction.ALL.getCode()), e.getMessage());
-    }
-
-    try {
-      factory.getActionsByCode(Integer.MIN_VALUE);
-      fail("IllegalArgumentException should be thrown.");
-    } catch (IllegalArgumentException e) {
-      assertEquals(String.format("Action code must between 1 and %d.",
-          ImpalaAction.ALL.getCode()), e.getMessage());
-    }
-  }
-
-  private static void assertBitFieldActions(List<ImpalaAction> expected,
-      List<? extends BitFieldAction> actual) {
-    assertEquals(expected.size(), actual.size());
-    for (int i = 0; i < actual.size(); i++) {
-      assertEquals(expected.get(i).getValue(), actual.get(i).getValue());
-      assertEquals(expected.get(i).getCode(), actual.get(i).getActionCode());
-    }
-  }
-
-  @Test
-  public void testGetActionByName() {
-    ImpalaActionFactory impala = new ImpalaActionFactory();
-
-    for (ImpalaAction action : ImpalaAction.values()) {
-      testGetActionByName(impala, action, action.getValue());
-    }
-    assertNull(impala.getActionByName("foo"));
-  }
-
-  private static void testGetActionByName(ImpalaActionFactory impala,
-      ImpalaAction expected, String name) {
-    assertEquals(toBitFieldAction(expected),
-        impala.getActionByName(name.toUpperCase()));
-    assertEquals(toBitFieldAction(expected),
-        impala.getActionByName(name.toLowerCase()));
-    assertEquals(toBitFieldAction(expected),
-        impala.getActionByName(randomizeCaseSensitivity(name)));
-  }
-
-  private static String randomizeCaseSensitivity(String str) {
-    char[] chars = str.toCharArray();
-    Random random = new Random(System.currentTimeMillis());
-    for (int i = 0; i < chars.length; i++) {
-      chars[i] = (random.nextBoolean()) ? Character.toUpperCase(chars[i]) : chars[i];
-    }
-    return new String(chars);
-  }
-
-  private static BitFieldAction toBitFieldAction(ImpalaAction action) {
-    return new BitFieldAction(action.getValue(), action.getCode());
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/compat-minicluster-profile-3/test/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssignment.java
----------------------------------------------------------------------
diff --git a/fe/src/compat-minicluster-profile-3/test/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssignment.java b/fe/src/compat-minicluster-profile-3/test/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssignment.java
deleted file mode 100644
index 85f8510..0000000
--- a/fe/src/compat-minicluster-profile-3/test/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssignment.java
+++ /dev/null
@@ -1,164 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements.  See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership.  The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License.  You may obtain a copy of the License at
-//
-//   http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied.  See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-package org.apache.impala.datagenerator;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.ClusterStatus;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.util.PairOfSameType;
-import org.apache.hadoop.hbase.util.Threads;
-import org.apache.impala.planner.HBaseScanNode;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
-
-/**
- * Deterministically assign regions to region servers.
- */
-public class HBaseTestDataRegionAssignment {
-  public class TableNotFoundException extends Exception {
-    public TableNotFoundException(String s) {
-      super(s);
-    }
-  }
-
-  private final static Logger LOG = LoggerFactory.getLogger(
-      HBaseTestDataRegionAssignment.class);
-  private final Configuration conf;
-  private Connection connection = null;
-  private final Admin admin;
-  private final List<ServerName> sortedRS; // sorted list of region server name
-  private final String[] splitPoints = { "1", "3", "5", "7", "9"};
-
-  private final static int REGION_MOVE_TIMEOUT_MILLIS = 60000;
-
-  public HBaseTestDataRegionAssignment() throws IOException {
-    conf = new Configuration();
-    connection = ConnectionFactory.createConnection(conf);
-    admin = connection.getAdmin();
-    ClusterStatus clusterStatus = admin.getClusterStatus();
-    List<ServerName> regionServerNames =
-        new ArrayList<ServerName>(clusterStatus.getServers());
-    ServerName master = clusterStatus.getMaster();
-    regionServerNames.remove(master);
-    sortedRS = new ArrayList<ServerName>(regionServerNames);
-    Collections.sort(sortedRS);
-  }
-
-  public void close() throws IOException {
-    admin.close();
-  }
-
-  /**
-   * The table comes in already split into regions specified by splitPoints and with data
-   * already loaded. Pair up adjacent regions and assign to the same server.
-   * Each region pair in ([unbound:1,1:3], [3:5,5:7], [7:9,9:unbound])
-   * will be on the same server.
-   */
-  public void performAssignment(String tableName) throws IOException,
-    InterruptedException, TableNotFoundException {
-    TableName table = TableName.valueOf(tableName);
-    if (!admin.tableExists(table)) {
-      throw new TableNotFoundException("Table " + tableName + " not found.");
-    }
-
-    // Sort the region by start key
-    List<RegionInfo> regions = admin.getRegions(table);
-    Preconditions.checkArgument(regions.size() == splitPoints.length + 1);
-    Collections.sort(regions, RegionInfo.COMPARATOR);
-    // Pair up two adjacent regions to the same region server. That is,
-    // region server 1 <- regions (unbound:1), (1:3)
-    // region server 2 <- regions (3:5), (5:7)
-    // region server 3 <- regions (7:9), (9:unbound)
-    HashMap<String, ServerName> expectedLocs = Maps.newHashMap();
-    for (int i = 0; i < regions.size(); ++i) {
-      RegionInfo regionInfo = regions.get(i);
-      int rsIdx = (i / 2) % sortedRS.size();
-      ServerName regionServerName = sortedRS.get(rsIdx);
-      LOG.info("Moving " + regionInfo.getRegionNameAsString() +
-               " to " + regionServerName.getAddress());
-      admin.move(regionInfo.getEncodedNameAsBytes(),
-          regionServerName.getServerName().getBytes());
-      expectedLocs.put(regionInfo.getRegionNameAsString(), regionServerName);
-    }
-
-    // admin.move() is an asynchronous operation. Wait for the move to complete.
-    // It should be done in 60 sec.
-    long start = System.currentTimeMillis();
-    long timeout = System.currentTimeMillis() + REGION_MOVE_TIMEOUT_MILLIS;
-    while (true) {
-      int matched = 0;
-      List<Pair<RegionInfo, ServerName>> pairs =
-          MetaTableAccessor.getTableRegionsAndLocations(connection, table);
-      Preconditions.checkState(pairs.size() == regions.size());
-      for (Pair<RegionInfo, ServerName> pair: pairs) {
-        RegionInfo regionInfo = pair.getFirst();
-        String regionName = regionInfo.getRegionNameAsString();
-        ServerName serverName = pair.getSecond();
-        Preconditions.checkNotNull(expectedLocs.get(regionName));
-        LOG.info(regionName + " " + HBaseScanNode.printKey(regionInfo.getStartKey()) +
-            " -> " +  serverName.getAddress().toString() + ", expecting " +
-            expectedLocs.get(regionName));
-        if (expectedLocs.get(regionName).equals(serverName)) {
-           ++matched;
-           continue;
-        }
-      }
-      if (matched == regions.size()) {
-        long elapsed = System.currentTimeMillis() - start;
-        LOG.info("Regions moved after " + elapsed + " millis.");
-        break;
-      }
-      if (System.currentTimeMillis() < timeout) {
-        Thread.sleep(100);
-        continue;
-      }
-      throw new IllegalStateException(
-          String.format("Failed to assign regions to servers after " +
-            REGION_MOVE_TIMEOUT_MILLIS + " millis."));
-    }
-
-    // Force a major compaction such that the HBase table is backed by deterministic
-    // physical artifacts (files, WAL, etc.). Our #rows estimate relies on the sizes of
-    // these physical artifacts.
-    LOG.info("Major compacting HBase table: " + tableName);
-    admin.majorCompact(table);
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/main/java/org/apache/impala/analysis/ParquetHelper.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ParquetHelper.java b/fe/src/main/java/org/apache/impala/analysis/ParquetHelper.java
new file mode 100644
index 0000000..8c9bff8
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/analysis/ParquetHelper.java
@@ -0,0 +1,341 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.analysis;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Maps;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.parquet.hadoop.metadata.ParquetMetadata;
+import org.apache.parquet.hadoop.ParquetFileReader;
+import org.apache.parquet.schema.OriginalType;
+import org.apache.parquet.schema.PrimitiveType;
+
+import org.apache.impala.catalog.ArrayType;
+import org.apache.impala.catalog.MapType;
+import org.apache.impala.catalog.ScalarType;
+import org.apache.impala.catalog.StructField;
+import org.apache.impala.catalog.StructType;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.common.FileSystemUtil;
+
+/**
+ * Provides extractParquetSchema() to extract a schema
+ * from a parquet file.
+ *
+ * Because Parquet's Java package changed between Parquet 1.5
+ * and 1.9, a second copy of this file, with "org.apache.parquet." replaced
+ * with "org.apache.org.apache.parquet." is generated by the build system.
+ */
+class ParquetHelper {
+  private final static String ERROR_MSG =
+      "Failed to convert Parquet type\n%s\nto an Impala %s type:\n%s\n";
+
+  /**
+   * Reads the first block from the given HDFS file and returns the Parquet schema.
+   * Throws Analysis exception for any failure, such as failing to read the file
+   * or failing to parse the contents.
+   */
+  private static org.apache.parquet.schema.MessageType loadParquetSchema(Path pathToFile)
+      throws AnalysisException {
+    try {
+      FileSystem fs = pathToFile.getFileSystem(FileSystemUtil.getConfiguration());
+      if (!fs.isFile(pathToFile)) {
+        throw new AnalysisException("Cannot infer schema, path is not a file: " +
+                                    pathToFile);
+      }
+    } catch (IOException e) {
+      throw new AnalysisException("Failed to connect to filesystem:" + e);
+    } catch (IllegalArgumentException e) {
+      throw new AnalysisException(e.getMessage());
+    }
+    ParquetMetadata readFooter = null;
+    try {
+      readFooter = ParquetFileReader.readFooter(FileSystemUtil.getConfiguration(),
+          pathToFile);
+    } catch (FileNotFoundException e) {
+      throw new AnalysisException("File not found: " + e);
+    } catch (IOException e) {
+      throw new AnalysisException("Failed to open file as a parquet file: " + e);
+    } catch (RuntimeException e) {
+      // Parquet throws a generic RuntimeException when reading a non-parquet file
+      if (e.toString().contains("is not a Parquet file")) {
+        throw new AnalysisException("File is not a parquet file: " + pathToFile);
+      }
+      // otherwise, who knows what we caught, throw it back up
+      throw e;
+    }
+     return readFooter.getFileMetaData().getSchema();
+  }
+
+  /**
+   * Converts a "primitive" Parquet type to an Impala type.
+   * A primitive type is a non-nested type with no annotations.
+   */
+  private static Type convertPrimitiveParquetType(org.apache.parquet.schema.Type parquetType)
+      throws AnalysisException {
+    Preconditions.checkState(parquetType.isPrimitive());
+    PrimitiveType prim = parquetType.asPrimitiveType();
+    switch (prim.getPrimitiveTypeName()) {
+      case BINARY: return Type.STRING;
+      case BOOLEAN: return Type.BOOLEAN;
+      case DOUBLE: return Type.DOUBLE;
+      case FIXED_LEN_BYTE_ARRAY:
+        throw new AnalysisException(
+            "Unsupported parquet type FIXED_LEN_BYTE_ARRAY for field " +
+                parquetType.getName());
+      case FLOAT: return Type.FLOAT;
+      case INT32: return Type.INT;
+      case INT64: return Type.BIGINT;
+      case INT96: return Type.TIMESTAMP;
+      default:
+        Preconditions.checkState(false, "Unexpected parquet primitive type: " +
+               prim.getPrimitiveTypeName());
+        return null;
+    }
+  }
+
+  /**
+   * Converts a Parquet group type to an Impala map Type. We support both standard
+   * Parquet map representations, as well as legacy. Legacy representations are handled
+   * according to this specification:
+   * https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#backward-compatibility-rules-1
+   *
+   * Standard representation of a map in Parquet:
+   * <optional | required> group <name> (MAP) { <-- outerGroup is pointing at this
+   * repeated group key_value {
+   *     required <key-type> key;
+   *     <optional | required> <value-type> value;
+   *   }
+   * }
+   */
+  private static MapType convertMap(org.apache.parquet.schema.GroupType outerGroup)
+      throws AnalysisException {
+    if (outerGroup.getFieldCount() != 1){
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "MAP", "The logical MAP type must have exactly 1 inner field."));
+    }
+
+    org.apache.parquet.schema.Type innerField = outerGroup.getType(0);
+    if (!innerField.isRepetition(org.apache.parquet.schema.Type.Repetition.REPEATED)){
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "MAP", "The logical MAP type must have a repeated inner field."));
+    }
+    if (innerField.isPrimitive()) {
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "MAP", "The inner field of the logical MAP type must be a group."));
+    }
+
+    org.apache.parquet.schema.GroupType innerGroup = innerField.asGroupType();
+    // It does not matter whether innerGroup has an annotation or not (for example it may
+    // be annotated with MAP_KEY_VALUE). We treat the case that innerGroup has an
+    // annotation and the case the innerGroup does not have an annotation the same.
+    if (innerGroup.getFieldCount() != 2) {
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "MAP", "The inner field of the logical MAP type must have exactly 2 fields."));
+    }
+
+    org.apache.parquet.schema.Type key = innerGroup.getType(0);
+    if (!key.getName().equals("key")) {
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "MAP", "The name of the first field of the inner field of the logical MAP " +
+          "type must be 'key'"));
+    }
+    if (!key.isPrimitive()) {
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "MAP", "The key type of the logical MAP type must be primitive."));
+    }
+    org.apache.parquet.schema.Type value = innerGroup.getType(1);
+    if (!value.getName().equals("value")) {
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "MAP", "The name of the second field of the inner field of the logical MAP " +
+          "type must be 'value'"));
+    }
+
+    return new MapType(convertParquetType(key), convertParquetType(value));
+  }
+
+  /**
+   * Converts a Parquet group type to an Impala struct Type.
+   */
+  private static StructType convertStruct(org.apache.parquet.schema.GroupType outerGroup)
+      throws AnalysisException {
+    ArrayList<StructField> structFields = new ArrayList<StructField>();
+    for (org.apache.parquet.schema.Type field: outerGroup.getFields()) {
+      StructField f = new StructField(field.getName(), convertParquetType(field));
+      structFields.add(f);
+    }
+    return new StructType(structFields);
+  }
+
+  /**
+   * Converts a Parquet group type to an Impala array Type. We can handle the standard
+   * representation, but also legacy representations for backwards compatibility.
+   * Legacy representations are handled according to this specification:
+   * https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#backward-compatibility-rules
+   *
+   * Standard representation of an array in Parquet:
+   * <optional | required> group <name> (LIST) { <-- outerGroup is pointing at this
+   *   repeated group list {
+   *     <optional | required> <element-type> element;
+   *   }
+   * }
+   */
+  private static ArrayType convertArray(org.apache.parquet.schema.GroupType outerGroup)
+      throws AnalysisException {
+    if (outerGroup.getFieldCount() != 1) {
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "LIST", "The logical LIST type must have exactly 1 inner field."));
+    }
+
+    org.apache.parquet.schema.Type innerField = outerGroup.getType(0);
+    if (!innerField.isRepetition(org.apache.parquet.schema.Type.Repetition.REPEATED)) {
+      throw new AnalysisException(String.format(ERROR_MSG, outerGroup.toString(),
+          "LIST", "The inner field of the logical LIST type must be repeated."));
+    }
+    if (innerField.isPrimitive() || innerField.getOriginalType() != null) {
+      // From the Parquet Spec:
+      // 1. If the repeated field is not a group then it's type is the element type.
+      //
+      // If innerField is a group, but originalType is not null, the element type is
+      // based on the logical type.
+      return new ArrayType(convertParquetType(innerField));
+    }
+
+    org.apache.parquet.schema.GroupType innerGroup = innerField.asGroupType();
+    if (innerGroup.getFieldCount() != 1) {
+      // From the Parquet Spec:
+      // 2. If the repeated field is a group with multiple fields, then it's type is a
+      //    struct.
+      return new ArrayType(convertStruct(innerGroup));
+    }
+
+    return new ArrayType(convertParquetType(innerGroup.getType(0)));
+  }
+
+  /**
+   * Converts a "logical" Parquet type to an Impala column type.
+   * A Parquet type is considered logical when it has an annotation. The annotation is
+   * stored as a "OriginalType". The Parquet documentation refers to these as logical
+   * types, so we use that terminology here.
+   */
+  private static Type convertLogicalParquetType(org.apache.parquet.schema.Type parquetType)
+      throws AnalysisException {
+    OriginalType orig = parquetType.getOriginalType();
+    if (orig == OriginalType.LIST) {
+      return convertArray(parquetType.asGroupType());
+    }
+    if (orig == OriginalType.MAP || orig == OriginalType.MAP_KEY_VALUE) {
+      // MAP_KEY_VALUE annotation should not be used any more. However, according to the
+      // Parquet spec, some existing data incorrectly uses MAP_KEY_VALUE in place of MAP.
+      // For backward-compatibility, a group annotated with MAP_KEY_VALUE that is not
+      // contained by a MAP-annotated group should be handled as a MAP-annotated group.
+      return convertMap(parquetType.asGroupType());
+    }
+
+    PrimitiveType prim = parquetType.asPrimitiveType();
+    if (prim.getPrimitiveTypeName() == PrimitiveType.PrimitiveTypeName.BINARY &&
+        (orig == OriginalType.UTF8 || orig == OriginalType.ENUM)) {
+      // UTF8 is the type annotation Parquet uses for strings
+      // ENUM is the type annotation Parquet uses to indicate that
+      // the original data type, before conversion to parquet, had been enum.
+      // Applications which do not have enumerated types (e.g. Impala)
+      // should interpret it as a string.
+      // We check to make sure it applies to BINARY to avoid errors if there is a bad
+      // annotation.
+      return Type.STRING;
+    }
+
+    if (prim.getPrimitiveTypeName() == PrimitiveType.PrimitiveTypeName.INT32
+        || prim.getPrimitiveTypeName() == PrimitiveType.PrimitiveTypeName.INT64) {
+      // Map signed integer types to an supported Impala column type
+      switch (orig) {
+        case INT_8: return Type.TINYINT;
+        case INT_16: return Type.SMALLINT;
+        case INT_32: return Type.INT;
+        case INT_64: return Type.BIGINT;
+      }
+    }
+
+    if (orig == OriginalType.DECIMAL) {
+      return ScalarType.createDecimalType(prim.getDecimalMetadata().getPrecision(),
+                                           prim.getDecimalMetadata().getScale());
+    }
+
+    throw new AnalysisException(
+        "Unsupported logical parquet type " + orig + " (primitive type is " +
+            prim.getPrimitiveTypeName().name() + ") for field " +
+            parquetType.getName());
+  }
+
+  /**
+   * Converts a Parquet type into an Impala type.
+   */
+  private static Type convertParquetType(org.apache.parquet.schema.Type field)
+      throws AnalysisException {
+    Type type = null;
+    // TODO for 2.3: If a field is not annotated with LIST, it can still be sometimes
+    // interpreted as an array. The following 2 examples should be interpreted as an array
+    // of integers, but this is currently not done.
+    // 1. repeated int int_col;
+    // 2. required group int_arr {
+    //      repeated group list {
+    //        required int element;
+    //      }
+    //    }
+    if (field.getOriginalType() != null) {
+      type = convertLogicalParquetType(field);
+    } else if (field.isPrimitive()) {
+      type = convertPrimitiveParquetType(field);
+    } else {
+      // If field is not primitive, it must be a struct.
+      type = convertStruct(field.asGroupType());
+    }
+    return type;
+  }
+
+  /**
+   * Parses a Parquet file stored in HDFS and returns the corresponding Impala schema.
+   * This fails with an analysis exception if any errors occur reading the file,
+   * parsing the Parquet schema, or if the Parquet types cannot be represented in Impala.
+   */
+  static List<ColumnDef> extractParquetSchema(HdfsUri location)
+      throws AnalysisException {
+    org.apache.parquet.schema.MessageType parquetSchema = loadParquetSchema(location.getPath());
+    List<org.apache.parquet.schema.Type> fields = parquetSchema.getFields();
+    List<ColumnDef> schema = new ArrayList<ColumnDef>();
+
+    for (org.apache.parquet.schema.Type field: fields) {
+      Type type = convertParquetType(field);
+      Preconditions.checkNotNull(type);
+      String colName = field.getName();
+      Map<ColumnDef.Option, Object> option = Maps.newHashMap();
+      option.put(ColumnDef.Option.COMMENT, "Inferred from Parquet file.");
+      schema.add(new ColumnDef(colName, new TypeDef(type), option));
+    }
+    return schema;
+  }
+}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/main/java/org/apache/impala/authorization/ImpalaActionFactory.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/ImpalaActionFactory.java b/fe/src/main/java/org/apache/impala/authorization/ImpalaActionFactory.java
new file mode 100644
index 0000000..c3ef004
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/authorization/ImpalaActionFactory.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.impala.authorization;
+
+import com.google.common.base.Preconditions;
+import org.apache.impala.authorization.Privilege.ImpalaAction;
+import org.apache.sentry.core.common.BitFieldAction;
+import org.apache.sentry.core.common.BitFieldActionFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * An implementation of BitFieldActionFactory for Impala.
+ */
+public class ImpalaActionFactory extends BitFieldActionFactory {
+  @Override
+  public List<? extends BitFieldAction> getActionsByCode(int actionCode) {
+    Preconditions.checkArgument(
+        actionCode >= 1 && actionCode <= ImpalaAction.ALL.getCode(),
+        String.format("Action code must between 1 and %d.", ImpalaAction.ALL.getCode()));
+
+    List<BitFieldAction> actions = new ArrayList<>();
+    for (ImpalaAction action : ImpalaAction.values()) {
+      if ((action.getCode() & actionCode) == action.getCode()) {
+        actions.add(action.getBitFieldAction());
+      }
+    }
+    return actions;
+  }
+
+  @Override
+  public BitFieldAction getActionByName(String name) {
+    Preconditions.checkNotNull(name);
+
+    for (ImpalaAction action : ImpalaAction.values()) {
+      if (action.getValue().equalsIgnoreCase(name)) {
+        return action.getBitFieldAction();
+      }
+    }
+    return null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/main/java/org/apache/impala/authorization/ImpalaPrivilegeModel.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/ImpalaPrivilegeModel.java b/fe/src/main/java/org/apache/impala/authorization/ImpalaPrivilegeModel.java
new file mode 100644
index 0000000..43a194e
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/authorization/ImpalaPrivilegeModel.java
@@ -0,0 +1,43 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.impala.authorization;
+
+import java.util.Map;
+
+import org.apache.sentry.core.common.BitFieldActionFactory;
+import org.apache.sentry.core.common.ImplyMethodType;
+import org.apache.sentry.core.model.db.HivePrivilegeModel;
+import org.apache.sentry.core.common.Model;
+
+/**
+ * Delegates to HivePrivilegeModel for getImplyMethodMap(), but
+ * uses Impala's BitFieldActionFactory implementation.
+ */
+public class ImpalaPrivilegeModel implements Model {
+  public static final ImpalaPrivilegeModel INSTANCE = new ImpalaPrivilegeModel();
+  private final ImpalaActionFactory actionFactory = new ImpalaActionFactory();
+
+  @Override
+  public Map<String, ImplyMethodType> getImplyMethodMap() {
+    return HivePrivilegeModel.getInstance().getImplyMethodMap();
+  }
+
+  @Override
+  public BitFieldActionFactory getBitFieldActionFactory() {
+    return actionFactory;
+  }
+}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/main/java/org/apache/impala/authorization/SentryAuthProvider.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/authorization/SentryAuthProvider.java b/fe/src/main/java/org/apache/impala/authorization/SentryAuthProvider.java
new file mode 100644
index 0000000..a4f0743
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/authorization/SentryAuthProvider.java
@@ -0,0 +1,80 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.authorization;
+
+import com.google.common.base.Preconditions;
+
+import org.apache.impala.catalog.AuthorizationPolicy;
+
+import org.apache.commons.lang.reflect.ConstructorUtils;
+import org.apache.sentry.core.common.Model;
+import org.apache.sentry.core.model.db.HivePrivilegeModel;
+import org.apache.sentry.policy.common.PolicyEngine;
+import org.apache.sentry.policy.engine.common.CommonPolicyEngine;
+import org.apache.sentry.provider.cache.SimpleCacheProviderBackend;
+import org.apache.sentry.provider.common.ProviderBackend;
+import org.apache.sentry.provider.common.ProviderBackendContext;
+import org.apache.sentry.provider.common.ResourceAuthorizationProvider;
+import org.apache.sentry.provider.file.SimpleFileProviderBackend;
+
+/**
+ * Wrapper to facilitate differences in Sentry APIs across Sentry versions.
+ */
+class SentryAuthProvider {
+  /*
+   * Creates a new ResourceAuthorizationProvider based on the given configuration.
+   */
+  static ResourceAuthorizationProvider createProvider(AuthorizationConfig config,
+      AuthorizationPolicy policy) {
+    try {
+      ProviderBackend providerBe;
+      // Create the appropriate backend provider.
+      if (config.isFileBasedPolicy()) {
+        providerBe = new SimpleFileProviderBackend(config.getSentryConfig().getConfig(),
+            config.getPolicyFile());
+        ProviderBackendContext context = new ProviderBackendContext();
+        providerBe.initialize(context);
+      } else {
+        // Note: The second parameter to the ProviderBackend is a "resourceFile" path
+        // which is not used by Impala. We cannot pass 'null' so instead pass an empty
+        // string.
+        providerBe = new SimpleCacheProviderBackend(config.getSentryConfig().getConfig(),
+            "");
+        Preconditions.checkNotNull(policy);
+        ProviderBackendContext context = new ProviderBackendContext();
+        context.setBindingHandle(policy);
+        providerBe.initialize(context);
+      }
+
+      CommonPolicyEngine engine =
+          new CommonPolicyEngine(providerBe);
+
+      // Try to create an instance of the specified policy provider class.
+      // Re-throw any exceptions that are encountered.
+      String policyFile = config.getPolicyFile() == null ? "" : config.getPolicyFile();
+
+      return (ResourceAuthorizationProvider) ConstructorUtils.invokeConstructor(
+          Class.forName(config.getPolicyProviderClassName()),
+          new Object[] {policyFile, engine, ImpalaPrivilegeModel.INSTANCE});
+    } catch (Exception e) {
+      // Re-throw as unchecked exception.
+      throw new IllegalStateException(
+          "Error creating ResourceAuthorizationProvider: ", e);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/main/java/org/apache/impala/compat/HdfsShim.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/compat/HdfsShim.java b/fe/src/main/java/org/apache/impala/compat/HdfsShim.java
new file mode 100644
index 0000000..9453f80
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/compat/HdfsShim.java
@@ -0,0 +1,30 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.compat;
+
+import org.apache.hadoop.fs.FileStatus;
+
+/**
+ * Wrapper classes to abstract away differences between HDFS versions in
+ * the MiniCluster profiles.
+ */
+public class HdfsShim {
+  public static boolean isErasureCoded(FileStatus fileStatus) {
+    return fileStatus.isErasureCoded();
+  }
+}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/main/java/org/apache/impala/compat/MetastoreShim.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/compat/MetastoreShim.java b/fe/src/main/java/org/apache/impala/compat/MetastoreShim.java
new file mode 100644
index 0000000..3d69545
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/compat/MetastoreShim.java
@@ -0,0 +1,127 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.compat;
+
+import java.util.List;
+
+import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hive.service.rpc.thrift.TGetColumnsReq;
+import org.apache.hive.service.rpc.thrift.TGetFunctionsReq;
+import org.apache.hive.service.rpc.thrift.TGetSchemasReq;
+import org.apache.hive.service.rpc.thrift.TGetTablesReq;
+import org.apache.impala.authorization.User;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.Pair;
+import org.apache.impala.service.Frontend;
+import org.apache.impala.service.MetadataOp;
+import org.apache.impala.thrift.TMetadataOpRequest;
+import org.apache.impala.thrift.TResultSet;
+import org.apache.thrift.TException;
+
+/**
+ * A wrapper around some of Hive's Metastore API's to abstract away differences
+ * between major versions of Hive. This implements the shimmed methods for Hive 2.
+ */
+public class MetastoreShim {
+  /**
+   * Wrapper around MetaStoreUtils.validateName() to deal with added arguments.
+   */
+  public static boolean validateName(String name) {
+    return MetaStoreUtils.validateName(name, null);
+  }
+
+  /**
+   * Wrapper around IMetaStoreClient.alter_partition() to deal with added
+   * arguments.
+   */
+  public static void alterPartition(IMetaStoreClient client, Partition partition)
+      throws InvalidOperationException, MetaException, TException {
+    client.alter_partition(
+        partition.getDbName(), partition.getTableName(), partition, null);
+  }
+
+  /**
+   * Wrapper around IMetaStoreClient.alter_partitions() to deal with added
+   * arguments.
+   */
+  public static void alterPartitions(IMetaStoreClient client, String dbName,
+      String tableName, List<Partition> partitions)
+      throws InvalidOperationException, MetaException, TException {
+    client.alter_partitions(dbName, tableName, partitions, null);
+  }
+
+  /**
+   * Wrapper around MetaStoreUtils.updatePartitionStatsFast() to deal with added
+   * arguments.
+   */
+  public static void updatePartitionStatsFast(Partition partition, Warehouse warehouse)
+      throws MetaException {
+    MetaStoreUtils.updatePartitionStatsFast(partition, warehouse, null);
+  }
+
+  /**
+   * Return the maximum number of Metastore objects that should be retrieved in
+   * a batch.
+   */
+  public static String metastoreBatchRetrieveObjectsMaxConfigKey() {
+    return HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX.toString();
+  }
+
+  /**
+   * Return the key and value that should be set in the partition parameters to
+   * mark that the stats were generated automatically by a stats task.
+   */
+  public static Pair<String, String> statsGeneratedViaStatsTaskParam() {
+    return Pair.create(StatsSetupConst.STATS_GENERATED, StatsSetupConst.TASK);
+  }
+
+  public static TResultSet execGetFunctions(
+      Frontend frontend, TMetadataOpRequest request, User user) throws ImpalaException {
+    TGetFunctionsReq req = request.getGet_functions_req();
+    return MetadataOp.getFunctions(
+        frontend, req.getCatalogName(), req.getSchemaName(), req.getFunctionName(), user);
+  }
+
+  public static TResultSet execGetColumns(
+      Frontend frontend, TMetadataOpRequest request, User user) throws ImpalaException {
+    TGetColumnsReq req = request.getGet_columns_req();
+    return MetadataOp.getColumns(frontend, req.getCatalogName(), req.getSchemaName(),
+        req.getTableName(), req.getColumnName(), user);
+  }
+
+  public static TResultSet execGetTables(
+      Frontend frontend, TMetadataOpRequest request, User user) throws ImpalaException {
+    TGetTablesReq req = request.getGet_tables_req();
+    return MetadataOp.getTables(frontend, req.getCatalogName(), req.getSchemaName(),
+        req.getTableName(), req.getTableTypes(), user);
+  }
+
+  public static TResultSet execGetSchemas(
+      Frontend frontend, TMetadataOpRequest request, User user) throws ImpalaException {
+    TGetSchemasReq req = request.getGet_schemas_req();
+    return MetadataOp.getSchemas(
+        frontend, req.getCatalogName(), req.getSchemaName(), user);
+  }
+}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/main/java/org/apache/impala/util/SentryUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/SentryUtil.java b/fe/src/main/java/org/apache/impala/util/SentryUtil.java
new file mode 100644
index 0000000..f85e890
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/util/SentryUtil.java
@@ -0,0 +1,54 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.util;
+
+import java.util.Set;
+
+import org.apache.sentry.core.common.exception.SentryAccessDeniedException;
+import org.apache.sentry.core.common.exception.SentryAlreadyExistsException;
+import org.apache.sentry.core.common.exception.SentryGroupNotFoundException;
+import org.apache.sentry.provider.db.service.thrift.SentryPolicyServiceClient;
+import org.apache.sentry.provider.db.service.thrift.TSentryRole;
+// See IMPALA-5540. Sentry over-shades itself (to avoid leaking Thrift),
+// causing this unusual package name. In the code below, we typically
+// check for either variant when it's available in the classpath.
+import sentry.org.apache.sentry.core.common.exception.SentryUserException;
+
+/**
+ * Wrapper to facilitate differences in Sentry APIs across Sentry versions.
+ */
+public class SentryUtil {
+  static boolean isSentryAlreadyExists(Exception e) {
+    return e instanceof SentryAlreadyExistsException || e instanceof
+      sentry.org.apache.sentry.core.common.exception.SentryAlreadyExistsException;
+  }
+
+  static boolean isSentryAccessDenied(Exception e) {
+    return e instanceof SentryAccessDeniedException || e instanceof
+      sentry.org.apache.sentry.core.common.exception.SentryAccessDeniedException;
+  }
+
+  public static boolean isSentryGroupNotFound(Exception e) {
+    return e instanceof SentryGroupNotFoundException;
+  }
+
+  static Set<TSentryRole> listRoles(SentryPolicyServiceClient client, String username)
+      throws SentryUserException {
+    return client.listAllRoles(username);
+  }
+}

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java b/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
index 2a45c3b..e0bd062 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
@@ -43,7 +43,6 @@ import org.apache.impala.common.FrontendTestBase;
 import org.apache.impala.common.ImpalaException;
 import org.apache.impala.common.InternalException;
 import org.apache.impala.common.RuntimeEnv;
-import org.apache.impala.compat.MiniclusterProfile;
 import org.apache.impala.service.Frontend;
 import org.apache.impala.testutil.ImpaladTestCatalog;
 import org.apache.impala.thrift.TMetadataOpRequest;
@@ -824,13 +823,11 @@ public class AuthorizationTest extends FrontendTestBase {
   public void TestShortUsernameUsed() throws Exception {
     // Different long variations of the same username.
     List<User> users = Lists.newArrayList(
-        // Hadoop 2 accepts kerberos names missing a realm, but insists
-        // on having a terminating '@' even when the default realm
-        // is intended.  Hadoop 3 now has more normal name convetions,
-        // where to specify the default realm, everything after and
-        // including the '@' character is omitted.
-        new User(USER.getName() + "/abc.host.com" +
-          (MiniclusterProfile.MINICLUSTER_PROFILE == 3 ? "" : "@")),
+        // Historical note: Hadoop 2 accepts kerberos names missing a realm, but insists
+        // on having a terminating '@' even when the default realm is intended. Hadoop 3
+        // now has more normal name conventions, where to specify the default realm,
+        // everything after and including the '@' character is omitted.
+        new User(USER.getName() + "/abc.host.com"),
         new User(USER.getName() + "/abc.host.com@REAL.COM"),
         new User(USER.getName() + "@REAL.COM"));
     for (User user: users) {

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/test/java/org/apache/impala/authorization/ImpalaActionFactoryTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/authorization/ImpalaActionFactoryTest.java b/fe/src/test/java/org/apache/impala/authorization/ImpalaActionFactoryTest.java
new file mode 100644
index 0000000..bd39839
--- /dev/null
+++ b/fe/src/test/java/org/apache/impala/authorization/ImpalaActionFactoryTest.java
@@ -0,0 +1,132 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package org.apache.impala.authorization;
+
+import com.google.common.collect.Lists;
+import org.apache.impala.authorization.Privilege.ImpalaAction;
+import org.apache.sentry.core.common.BitFieldAction;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Random;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
+
+public class ImpalaActionFactoryTest {
+  @Test
+  public void testGetActionsByCode() {
+    ImpalaActionFactory factory = new ImpalaActionFactory();
+
+    List<? extends BitFieldAction> actual = factory.getActionsByCode(
+        ImpalaAction.SELECT.getCode() |
+        ImpalaAction.INSERT.getCode() |
+        ImpalaAction.CREATE.getCode());
+    List<ImpalaAction> expected = Lists.newArrayList(
+        ImpalaAction.SELECT,
+        ImpalaAction.INSERT,
+        ImpalaAction.CREATE);
+    assertBitFieldActions(expected, actual);
+
+    actual = factory.getActionsByCode(
+        ImpalaAction.SELECT.getCode() |
+        ImpalaAction.INSERT.getCode() |
+        ImpalaAction.ALTER.getCode() |
+        ImpalaAction.CREATE.getCode() |
+        ImpalaAction.DROP.getCode() |
+        ImpalaAction.REFRESH.getCode());
+    expected = Lists.newArrayList(
+         ImpalaAction.SELECT,
+         ImpalaAction.INSERT,
+         ImpalaAction.ALTER,
+         ImpalaAction.CREATE,
+         ImpalaAction.DROP,
+         ImpalaAction.REFRESH,
+         ImpalaAction.ALL);
+     assertBitFieldActions(expected, actual);
+
+    actual = factory.getActionsByCode(ImpalaAction.ALL.getCode());
+    expected = Lists.newArrayList(
+        ImpalaAction.SELECT,
+        ImpalaAction.INSERT,
+        ImpalaAction.ALTER,
+        ImpalaAction.CREATE,
+        ImpalaAction.DROP,
+        ImpalaAction.REFRESH,
+        ImpalaAction.ALL);
+    assertBitFieldActions(expected, actual);
+
+    try {
+      factory.getActionsByCode(Integer.MAX_VALUE);
+      fail("IllegalArgumentException should be thrown.");
+    } catch (IllegalArgumentException e) {
+      assertEquals(String.format("Action code must between 1 and %d.",
+          ImpalaAction.ALL.getCode()), e.getMessage());
+    }
+
+    try {
+      factory.getActionsByCode(Integer.MIN_VALUE);
+      fail("IllegalArgumentException should be thrown.");
+    } catch (IllegalArgumentException e) {
+      assertEquals(String.format("Action code must between 1 and %d.",
+          ImpalaAction.ALL.getCode()), e.getMessage());
+    }
+  }
+
+  private static void assertBitFieldActions(List<ImpalaAction> expected,
+      List<? extends BitFieldAction> actual) {
+    assertEquals(expected.size(), actual.size());
+    for (int i = 0; i < actual.size(); i++) {
+      assertEquals(expected.get(i).getValue(), actual.get(i).getValue());
+      assertEquals(expected.get(i).getCode(), actual.get(i).getActionCode());
+    }
+  }
+
+  @Test
+  public void testGetActionByName() {
+    ImpalaActionFactory impala = new ImpalaActionFactory();
+
+    for (ImpalaAction action : ImpalaAction.values()) {
+      testGetActionByName(impala, action, action.getValue());
+    }
+    assertNull(impala.getActionByName("foo"));
+  }
+
+  private static void testGetActionByName(ImpalaActionFactory impala,
+      ImpalaAction expected, String name) {
+    assertEquals(toBitFieldAction(expected),
+        impala.getActionByName(name.toUpperCase()));
+    assertEquals(toBitFieldAction(expected),
+        impala.getActionByName(name.toLowerCase()));
+    assertEquals(toBitFieldAction(expected),
+        impala.getActionByName(randomizeCaseSensitivity(name)));
+  }
+
+  private static String randomizeCaseSensitivity(String str) {
+    char[] chars = str.toCharArray();
+    Random random = new Random(System.currentTimeMillis());
+    for (int i = 0; i < chars.length; i++) {
+      chars[i] = (random.nextBoolean()) ? Character.toUpperCase(chars[i]) : chars[i];
+    }
+    return new String(chars);
+  }
+
+  private static BitFieldAction toBitFieldAction(ImpalaAction action) {
+    return new BitFieldAction(action.getValue(), action.getCode());
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/test/java/org/apache/impala/common/FrontendTestBase.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/common/FrontendTestBase.java b/fe/src/test/java/org/apache/impala/common/FrontendTestBase.java
index 64d743c..7b22551 100644
--- a/fe/src/test/java/org/apache/impala/common/FrontendTestBase.java
+++ b/fe/src/test/java/org/apache/impala/common/FrontendTestBase.java
@@ -54,7 +54,6 @@ import org.apache.impala.catalog.ScalarType;
 import org.apache.impala.catalog.Table;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.catalog.View;
-import org.apache.impala.compat.MiniclusterProfile;
 import org.apache.impala.service.CatalogOpExecutor;
 import org.apache.impala.service.Frontend;
 import org.apache.impala.testutil.ImpaladTestCatalog;
@@ -409,15 +408,14 @@ public class FrontendTestBase {
       String errorString = e.getMessage();
       Preconditions.checkNotNull(errorString, "Stack trace lost during exception.");
       String msg = "got error:\n" + errorString + "\nexpected:\n" + expectedErrorString;
-      if (MiniclusterProfile.MINICLUSTER_PROFILE == 3) {
-        // Different versions of Hive have slightly different error messages;
-        // we normalize here as follows:
-        // 'No FileSystem for Scheme "x"' -> 'No FileSystem for scheme: x'
-        if (errorString.contains("No FileSystem for scheme ")) {
-          errorString = errorString.replace("\"", "");
-          errorString = errorString.replace("No FileSystem for scheme ",
-              "No FileSystem for scheme: ");
-        }
+      // TODO: This logic can be removed.
+      // Different versions of Hive have slightly different error messages;
+      // we normalize here as follows:
+      // 'No FileSystem for Scheme "x"' -> 'No FileSystem for scheme: x'
+      if (errorString.contains("No FileSystem for scheme ")) {
+        errorString = errorString.replace("\"", "");
+        errorString = errorString.replace("No FileSystem for scheme ",
+            "No FileSystem for scheme: ");
       }
       Assert.assertTrue(msg, errorString.startsWith(expectedErrorString));
       return;

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/fe/src/test/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssignment.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssignment.java b/fe/src/test/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssignment.java
new file mode 100644
index 0000000..85f8510
--- /dev/null
+++ b/fe/src/test/java/org/apache/impala/datagenerator/HBaseTestDataRegionAssignment.java
@@ -0,0 +1,164 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.datagenerator;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.PairOfSameType;
+import org.apache.hadoop.hbase.util.Threads;
+import org.apache.impala.planner.HBaseScanNode;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Maps;
+
+/**
+ * Deterministically assign regions to region servers.
+ */
+public class HBaseTestDataRegionAssignment {
+  public class TableNotFoundException extends Exception {
+    public TableNotFoundException(String s) {
+      super(s);
+    }
+  }
+
+  private final static Logger LOG = LoggerFactory.getLogger(
+      HBaseTestDataRegionAssignment.class);
+  private final Configuration conf;
+  private Connection connection = null;
+  private final Admin admin;
+  private final List<ServerName> sortedRS; // sorted list of region server name
+  private final String[] splitPoints = { "1", "3", "5", "7", "9"};
+
+  private final static int REGION_MOVE_TIMEOUT_MILLIS = 60000;
+
+  public HBaseTestDataRegionAssignment() throws IOException {
+    conf = new Configuration();
+    connection = ConnectionFactory.createConnection(conf);
+    admin = connection.getAdmin();
+    ClusterStatus clusterStatus = admin.getClusterStatus();
+    List<ServerName> regionServerNames =
+        new ArrayList<ServerName>(clusterStatus.getServers());
+    ServerName master = clusterStatus.getMaster();
+    regionServerNames.remove(master);
+    sortedRS = new ArrayList<ServerName>(regionServerNames);
+    Collections.sort(sortedRS);
+  }
+
+  public void close() throws IOException {
+    admin.close();
+  }
+
+  /**
+   * The table comes in already split into regions specified by splitPoints and with data
+   * already loaded. Pair up adjacent regions and assign to the same server.
+   * Each region pair in ([unbound:1,1:3], [3:5,5:7], [7:9,9:unbound])
+   * will be on the same server.
+   */
+  public void performAssignment(String tableName) throws IOException,
+    InterruptedException, TableNotFoundException {
+    TableName table = TableName.valueOf(tableName);
+    if (!admin.tableExists(table)) {
+      throw new TableNotFoundException("Table " + tableName + " not found.");
+    }
+
+    // Sort the region by start key
+    List<RegionInfo> regions = admin.getRegions(table);
+    Preconditions.checkArgument(regions.size() == splitPoints.length + 1);
+    Collections.sort(regions, RegionInfo.COMPARATOR);
+    // Pair up two adjacent regions to the same region server. That is,
+    // region server 1 <- regions (unbound:1), (1:3)
+    // region server 2 <- regions (3:5), (5:7)
+    // region server 3 <- regions (7:9), (9:unbound)
+    HashMap<String, ServerName> expectedLocs = Maps.newHashMap();
+    for (int i = 0; i < regions.size(); ++i) {
+      RegionInfo regionInfo = regions.get(i);
+      int rsIdx = (i / 2) % sortedRS.size();
+      ServerName regionServerName = sortedRS.get(rsIdx);
+      LOG.info("Moving " + regionInfo.getRegionNameAsString() +
+               " to " + regionServerName.getAddress());
+      admin.move(regionInfo.getEncodedNameAsBytes(),
+          regionServerName.getServerName().getBytes());
+      expectedLocs.put(regionInfo.getRegionNameAsString(), regionServerName);
+    }
+
+    // admin.move() is an asynchronous operation. Wait for the move to complete.
+    // It should be done in 60 sec.
+    long start = System.currentTimeMillis();
+    long timeout = System.currentTimeMillis() + REGION_MOVE_TIMEOUT_MILLIS;
+    while (true) {
+      int matched = 0;
+      List<Pair<RegionInfo, ServerName>> pairs =
+          MetaTableAccessor.getTableRegionsAndLocations(connection, table);
+      Preconditions.checkState(pairs.size() == regions.size());
+      for (Pair<RegionInfo, ServerName> pair: pairs) {
+        RegionInfo regionInfo = pair.getFirst();
+        String regionName = regionInfo.getRegionNameAsString();
+        ServerName serverName = pair.getSecond();
+        Preconditions.checkNotNull(expectedLocs.get(regionName));
+        LOG.info(regionName + " " + HBaseScanNode.printKey(regionInfo.getStartKey()) +
+            " -> " +  serverName.getAddress().toString() + ", expecting " +
+            expectedLocs.get(regionName));
+        if (expectedLocs.get(regionName).equals(serverName)) {
+           ++matched;
+           continue;
+        }
+      }
+      if (matched == regions.size()) {
+        long elapsed = System.currentTimeMillis() - start;
+        LOG.info("Regions moved after " + elapsed + " millis.");
+        break;
+      }
+      if (System.currentTimeMillis() < timeout) {
+        Thread.sleep(100);
+        continue;
+      }
+      throw new IllegalStateException(
+          String.format("Failed to assign regions to servers after " +
+            REGION_MOVE_TIMEOUT_MILLIS + " millis."));
+    }
+
+    // Force a major compaction such that the HBase table is backed by deterministic
+    // physical artifacts (files, WAL, etc.). Our #rows estimate relies on the sizes of
+    // these physical artifacts.
+    LOG.info("Major compacting HBase table: " + tableName);
+    admin.majorCompact(table);
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/impala-parent/pom.xml
----------------------------------------------------------------------
diff --git a/impala-parent/pom.xml b/impala-parent/pom.xml
index d962b43..bd091ea 100644
--- a/impala-parent/pom.xml
+++ b/impala-parent/pom.xml
@@ -87,6 +87,14 @@ under the License.
       </snapshots>
     </repository>
     <repository>
+      <id>impala.cdh.repo</id>
+      <url>https://${env.CDH_DOWNLOAD_HOST}/build/cdh_components/${env.CDH_BUILD_NUMBER}/maven</url>
+      <name>Impala CDH Repository</name>
+      <snapshots>
+        <enabled>true</enabled>
+      </snapshots>
+    </repository>
+    <repository>
       <id>cloudera.thirdparty.repo</id>
       <url>https://repository.cloudera.com/content/repositories/third-party</url>
       <name>Cloudera Third Party Repository</name>
@@ -115,45 +123,4 @@ under the License.
     </pluginRepository>
   </pluginRepositories>
 
-  <profiles>
-    <profile>
-      <id>impala-mini-cluster-profile-2</id>
-      <activation>
-        <property>
-          <name>env.IMPALA_MINICLUSTER_PROFILE</name>
-          <value>2</value>
-        </property>
-      </activation>
-      <repositories>
-        <repository>
-          <id>cdh.snapshots.repo</id>
-          <url>https://repository.cloudera.com/content/repositories/snapshots</url>
-          <name>CDH Snapshots Repository</name>
-          <snapshots>
-            <enabled>true</enabled>
-          </snapshots>
-        </repository>
-      </repositories>
-    </profile>
-    <profile>
-      <id>impala-mini-cluster-profile-3</id>
-      <activation>
-        <property>
-          <name>env.IMPALA_MINICLUSTER_PROFILE</name>
-          <value>3</value>
-        </property>
-      </activation>
-      <repositories>
-        <repository>
-          <id>impala.cdh.repo</id>
-          <url>https://${env.CDH_DOWNLOAD_HOST}/build/cdh_components/${env.CDH_BUILD_NUMBER}/maven</url>
-          <name>Impala CDH Repository</name>
-          <snapshots>
-            <enabled>true</enabled>
-          </snapshots>
-        </repository>
-      </repositories>
-    </profile>
-  </profiles>
-
 </project>

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/testdata/bin/run-hbase.sh
----------------------------------------------------------------------
diff --git a/testdata/bin/run-hbase.sh b/testdata/bin/run-hbase.sh
index 264951a..1433073 100755
--- a/testdata/bin/run-hbase.sh
+++ b/testdata/bin/run-hbase.sh
@@ -36,9 +36,7 @@ cat > ${HBASE_CONF_DIR}/hbase-env.sh <<EOF
 export JAVA_HOME=${JAVA_HOME}
 export HBASE_LOG_DIR=${HBASE_LOGDIR}
 export HBASE_PID_DIR=${HBASE_LOGDIR}
-if [[ $IMPALA_MINICLUSTER_PROFILE == 3 ]]; then
-  export HBASE_CLASSPATH=${HADOOP_CLASSPATH}
-fi
+export HBASE_CLASSPATH=${HADOOP_CLASSPATH}
 export HBASE_HEAPSIZE=1g
 EOF
 

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/testdata/bin/run-hive-server.sh
----------------------------------------------------------------------
diff --git a/testdata/bin/run-hive-server.sh b/testdata/bin/run-hive-server.sh
index 3b2c83d..2b5a486 100755
--- a/testdata/bin/run-hive-server.sh
+++ b/testdata/bin/run-hive-server.sh
@@ -75,12 +75,8 @@ if [ ${ONLY_METASTORE} -eq 0 ]; then
   # Starts a HiveServer2 instance on the port specified by the HIVE_SERVER2_THRIFT_PORT
   # environment variable. HADOOP_HEAPSIZE should be set to at least 2048 to avoid OOM
   # when loading ORC tables like widerow.
-  if [[ $IMPALA_MINICLUSTER_PROFILE == 2 ]]; then
-    HADOOP_HEAPSIZE="2048" hive --service hiveserver2 > ${LOGDIR}/hive-server2.out 2>&1 &
-  elif [[ $IMPALA_MINICLUSTER_PROFILE == 3 ]]; then
-    HADOOP_CLIENT_OPTS="-Xmx2048m -Dhive.log.file=hive-server2.log" hive \
+  HADOOP_CLIENT_OPTS="-Xmx2048m -Dhive.log.file=hive-server2.log" hive \
       --service hiveserver2 > ${LOGDIR}/hive-server2.out 2>&1 &
-  fi
 
   # Wait for the HiveServer2 service to come up because callers of this script
   # may rely on it being available.

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/testdata/bin/run-mini-dfs.sh
----------------------------------------------------------------------
diff --git a/testdata/bin/run-mini-dfs.sh b/testdata/bin/run-mini-dfs.sh
index aefa703..ea6c519 100755
--- a/testdata/bin/run-mini-dfs.sh
+++ b/testdata/bin/run-mini-dfs.sh
@@ -40,9 +40,6 @@ fi
 set +e
 $IMPALA_HOME/testdata/cluster/admin start_cluster
 if [[ $? != 0 ]]; then
-  # Don't issue Java version warning when not running Hadoop 3.
-  [[ $IMPALA_MINICLUSTER_PROFILE != 3 ]] && exit 1
-
   # Only issue Java version warning when running Java 7.
   $JAVA -version 2>&1 | grep -q 'java version "1.7' || exit 1
 

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/testdata/cluster/node_templates/common/etc/init.d/common.tmpl
----------------------------------------------------------------------
diff --git a/testdata/cluster/node_templates/common/etc/init.d/common.tmpl b/testdata/cluster/node_templates/common/etc/init.d/common.tmpl
index 8cde0b6..51197c5 100644
--- a/testdata/cluster/node_templates/common/etc/init.d/common.tmpl
+++ b/testdata/cluster/node_templates/common/etc/init.d/common.tmpl
@@ -19,20 +19,14 @@ NODE_DIR="${NODE_DIR}"
 PID_DIR="$NODE_DIR/var/run"
 LOG_DIR="$NODE_DIR/var/log"
 
-if [[ $IMPALA_MINICLUSTER_PROFILE == 3 ]]; then
-  export HADOOP_PID_DIR=$NODE_DIR/var/run
-fi
 export HADOOP_CONF_DIR="$NODE_DIR/etc/hadoop/conf"
-if [[ $IMPALA_MINICLUSTER_PROFILE == 2 ]]; then
-  export YARN_CONF_DIR="$HADOOP_CONF_DIR"
-fi
+export HADOOP_PID_DIR=$NODE_DIR/var/run
 
 # Mark each process so they can be killed if needed. This is a safety mechanism for
 # stopping the processes if the pid file has been removed for whatever reason.
 export HADOOP_OPTS+=" -D${KILL_CLUSTER_MARKER}"
-if [[ $IMPALA_MINICLUSTER_PROFILE == 3 ]]; then
-  export YARN_OPTS+=" -D${KILL_CLUSTER_MARKER}"
-fi
+export YARN_OPTS+=" -D${KILL_CLUSTER_MARKER}"
+
 # This is for KMS.
 export CATALINA_OPTS+=" -D${KILL_CLUSTER_MARKER}"
 

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/testdata/cluster/node_templates/common/etc/init.d/yarn-common
----------------------------------------------------------------------
diff --git a/testdata/cluster/node_templates/common/etc/init.d/yarn-common b/testdata/cluster/node_templates/common/etc/init.d/yarn-common
index dc60971..9934307 100644
--- a/testdata/cluster/node_templates/common/etc/init.d/yarn-common
+++ b/testdata/cluster/node_templates/common/etc/init.d/yarn-common
@@ -15,14 +15,6 @@
 # specific language governing permissions and limitations
 # under the License.
 
-if [[ $IMPALA_MINICLUSTER_PROFILE == 2 ]]; then
-  export YARN_LOG_DIR="$LOG_DIR/hadoop-yarn"
-  export YARN_ROOT_LOGGER="${YARN_ROOT_LOGGER:-DEBUG,RFA}"
-
-  export YARN_LOGFILE=$(basename $0).log
-elif [[ $IMPALA_MINICLUSTER_PROFILE == 3 ]]; then
-  export HADOOP_LOG_DIR="$LOG_DIR/hadoop-yarn"
-  export HADOOP_ROOT_LOGGER="${HADOOP_ROOT_LOGGER:-DEBUG,RFA}"
-
-  export HADOOP_LOGFILE=$(basename $0).log
-fi
+export HADOOP_LOG_DIR="$LOG_DIR/hadoop-yarn"
+export HADOOP_ROOT_LOGGER="${HADOOP_ROOT_LOGGER:-DEBUG,RFA}"
+export HADOOP_LOGFILE=$(basename $0).log

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/testdata/pom.xml
----------------------------------------------------------------------
diff --git a/testdata/pom.xml b/testdata/pom.xml
index af43fe9..22bf270 100644
--- a/testdata/pom.xml
+++ b/testdata/pom.xml
@@ -183,27 +183,6 @@ under the License.
           <redirectTestOutputToFile>true</redirectTestOutputToFile>
         </configuration>
       </plugin>
-
-      <!-- Support different src dirs for different minicluster profiles. -->
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-        <version>1.5</version>
-        <executions>
-          <execution>
-            <id>add-source</id>
-            <phase>generate-sources</phase>
-            <goals>
-              <goal>add-source</goal>
-            </goals>
-            <configuration>
-              <sources>
-                <source>${project.basedir}/src/compat-minicluster-profile-${env.IMPALA_MINICLUSTER_PROFILE}/java</source>
-              </sources>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
     </plugins>
   </build>
 

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/testdata/workloads/functional-query/queries/QueryTest/views-compatibility.test
----------------------------------------------------------------------
diff --git a/testdata/workloads/functional-query/queries/QueryTest/views-compatibility.test b/testdata/workloads/functional-query/queries/QueryTest/views-compatibility.test
index d35b4bf..f90d9b4 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/views-compatibility.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/views-compatibility.test
@@ -76,10 +76,10 @@ select id, int_col, string_col from functional.alltypesagg
 order by int_col limit 10 offset 5
 ---- CREATE_VIEW_RESULTS
 IMPALA=SUCCESS
-HIVE=SUCCESS_PROFILE_3_ONLY
+HIVE=SUCCESS
 ---- QUERY_IMPALA_VIEW_RESULTS
 IMPALA=SUCCESS
-HIVE=SUCCESS_PROFILE_3_ONLY
+HIVE=SUCCESS
 ====
 ---- CREATE_VIEW
 # Test that creating a view in Impala with "NULLS FIRST/LAST" works when the nulls
@@ -90,7 +90,7 @@ select id, int_col, string_col from functional.alltypesagg
 order by int_col asc nulls last limit 10
 ---- CREATE_VIEW_RESULTS
 IMPALA=SUCCESS
-HIVE=SUCCESS_PROFILE_3_ONLY
+HIVE=SUCCESS
 ---- QUERY_IMPALA_VIEW_RESULTS
 IMPALA=SUCCESS
 HIVE=SUCCESS
@@ -103,10 +103,10 @@ select id, int_col, string_col from functional.alltypesagg
 order by int_col desc nulls last limit 10
 ---- CREATE_VIEW_RESULTS
 IMPALA=SUCCESS
-HIVE=SUCCESS_PROFILE_3_ONLY
+HIVE=SUCCESS
 ---- QUERY_IMPALA_VIEW_RESULTS
 IMPALA=SUCCESS
-HIVE=SUCCESS_PROFILE_3_ONLY
+HIVE=SUCCESS
 ====
 ---- CREATE_VIEW
 # Test that exotic column names are quoted in

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/tests/common/environ.py
----------------------------------------------------------------------
diff --git a/tests/common/environ.py b/tests/common/environ.py
index fc863dc..b61da1b 100644
--- a/tests/common/environ.py
+++ b/tests/common/environ.py
@@ -181,6 +181,3 @@ def specific_build_type_timeout(
     timeout_val = default_timeout
   return timeout_val
 
-def is_hive_2():
-  """Returns True if IMPALA_MINICLUSTER_PROFILE in use provides Hive 2."""
-  return os.environ.get("IMPALA_MINICLUSTER_PROFILE", None) == "3"

http://git-wip-us.apache.org/repos/asf/impala/blob/a203733f/tests/metadata/test_views_compatibility.py
----------------------------------------------------------------------
diff --git a/tests/metadata/test_views_compatibility.py b/tests/metadata/test_views_compatibility.py
index 11227dd..f4987c3 100644
--- a/tests/metadata/test_views_compatibility.py
+++ b/tests/metadata/test_views_compatibility.py
@@ -22,7 +22,6 @@ import shlex
 from subprocess import call
 
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
-from tests.common.environ import is_hive_2
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import SkipIfS3, SkipIfADLS, SkipIfIsilon, SkipIfLocal
 from tests.common.test_dimensions import create_uncompressed_text_dimension
@@ -254,8 +253,6 @@ class ViewCompatTestCase(object):
       component_value = components[2].upper()
       if component_value == 'SUCCESS':
         exp_res[components[0]] = True
-      elif component_value == 'SUCCESS_PROFILE_3_ONLY':
-        exp_res[components[0]] = is_hive_2()
       elif component_value == 'FAILURE':
         exp_res[components[0]] = False
       else: