You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by bo...@apache.org on 2019/05/28 10:26:09 UTC

[impala] branch master updated (9ee4a5e -> 5ce57ca)

This is an automated email from the ASF dual-hosted git repository.

boroknagyz pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git.


    from 9ee4a5e  acid: Filter unwanted files based on ACID state.
     new 2750f0a  IMPALA-8400: Implement Ranger audit event handler
     new 6839d97  IMPALA-8524: part2: Avoid calling "hive" via command line in EE tests
     new 90da7fa  [DOCS] Corrected the privileges required for CREATE FUNCTION
     new 377471f  IMPALA-8248: Improve Ranger test coverage
     new b00d031  IMPALA-6903: Download profile from WebUI in text format
     new 31195eb  IMPALA-8473: Publish lineage info via hook
     new 5faf174  IMPALA-8585: Fix for upgraded + compacted acid tables
     new 5ce57ca  IMPALA-8369: Add HIVE_MAJOR_VERSION section to planner tests + some fixes

The 8 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 be/src/service/frontend.cc                         |   7 +
 be/src/service/frontend.h                          |   5 +
 be/src/service/impala-http-handler.cc              |  19 +-
 be/src/service/impala-http-handler.h               |   9 +
 be/src/service/impala-server.cc                    |  57 +-
 be/src/service/impala-server.h                     |   6 +
 be/src/util/backend-gflag-util.cc                  |   4 +
 common/thrift/BackendGflags.thrift                 |   4 +
 common/thrift/Frontend.thrift                      |  10 +
 docs/topics/impala_create_function.xml             |  35 +-
 docs/topics/impala_udf.xml                         |  33 +-
 .../apache/impala/analysis/AnalysisContext.java    |   9 +-
 .../impala/authorization/AuthorizationChecker.java |  16 +-
 ...zationConfig.java => AuthorizationContext.java} |  20 +-
 .../authorization/BaseAuthorizationChecker.java    |  64 +-
 .../authorization/NoopAuthorizationFactory.java    |   9 +-
 .../ranger/RangerAuthorizationChecker.java         | 101 ++-
 ...Plugin.java => RangerAuthorizationContext.java} |  18 +-
 .../ranger/RangerBufferAuditHandler.java           | 117 ++++
 .../ranger/RangerCatalogdAuthorizationManager.java |  12 +-
 .../ranger/RangerImpaladAuthorizationManager.java  |   4 +-
 .../sentry/SentryAuthorizationChecker.java         |   9 +-
 .../apache/impala/hooks/QueryCompleteContext.java  |  56 ++
 .../org/apache/impala/hooks/QueryEventHook.java    | 116 ++++
 .../apache/impala/hooks/QueryEventHookManager.java | 229 +++++++
 .../org/apache/impala/service/BackendConfig.java   |   8 +
 .../java/org/apache/impala/service/Frontend.java   |  71 +-
 .../org/apache/impala/service/JniFrontend.java     |  43 +-
 .../java/org/apache/impala/util/AcidUtils.java     |   8 +-
 .../authorization/AuthorizationStmtTest.java       | 724 +-------------------
 .../impala/authorization/AuthorizationTest.java    |   2 -
 .../authorization/AuthorizationTestBase.java       | 727 +++++++++++++++++++++
 .../authorization/ranger/RangerAuditLogTest.java   | 196 ++++++
 .../org/apache/impala/common/FrontendTestBase.java |  10 +-
 .../impala/hooks/QueryEventHookManagerTest.java    | 146 +++++
 .../org/apache/impala/planner/PlannerTestBase.java |  12 +
 .../impala/testutil/AlwaysErrorQueryEventHook.java |  23 +-
 .../impala/testutil/CountingQueryEventHook.java    |  52 ++
 .../impala/testutil/DummyQueryEventHook.java       |  53 ++
 .../impala/testutil/PostQueryErrorEventHook.java   |  20 +-
 .../org/apache/impala/testutil/TestFileParser.java |   3 +-
 .../java/org/apache/impala/util/AcidUtilsTest.java |  49 +-
 .../queries/PlannerTest/resource-requirements.test | 140 ++++
 .../queries/QueryTest/acid-compaction.test         |  31 +
 .../functional-query/queries/QueryTest/acid.test   |  18 +
 .../QueryTest/views-compatibility-hive2-only.test  |  30 +
 .../QueryTest/views-compatibility-hive3-only.test  |  15 +
 .../queries/QueryTest/views-compatibility.test     |  27 -
 tests/authorization/test_provider.py               |   4 +-
 tests/authorization/test_ranger.py                 | 175 +++++
 tests/custom_cluster/test_query_event_hooks.py     | 202 ++++++
 tests/metadata/test_views_compatibility.py         |  15 +-
 tests/query_test/test_acid.py                      |   1 -
 tests/query_test/test_scanners.py                  |   4 +-
 tests/webserver/test_web_pages.py                  |  22 +
 www/query_profile.tmpl                             |  10 +-
 56 files changed, 2871 insertions(+), 939 deletions(-)
 copy fe/src/main/java/org/apache/impala/authorization/{AuthorizationConfig.java => AuthorizationContext.java} (71%)
 copy fe/src/main/java/org/apache/impala/authorization/ranger/{RangerImpalaPlugin.java => RangerAuthorizationContext.java} (58%)
 create mode 100644 fe/src/main/java/org/apache/impala/authorization/ranger/RangerBufferAuditHandler.java
 create mode 100644 fe/src/main/java/org/apache/impala/hooks/QueryCompleteContext.java
 create mode 100644 fe/src/main/java/org/apache/impala/hooks/QueryEventHook.java
 create mode 100644 fe/src/main/java/org/apache/impala/hooks/QueryEventHookManager.java
 create mode 100644 fe/src/test/java/org/apache/impala/authorization/AuthorizationTestBase.java
 create mode 100644 fe/src/test/java/org/apache/impala/authorization/ranger/RangerAuditLogTest.java
 create mode 100644 fe/src/test/java/org/apache/impala/hooks/QueryEventHookManagerTest.java
 copy common/yarn-extras/src/main/java/org/apache/impala/yarn/server/resourcemanager/scheduler/fair/SchedulingPolicy.java => fe/src/test/java/org/apache/impala/testutil/AlwaysErrorQueryEventHook.java (60%)
 create mode 100644 fe/src/test/java/org/apache/impala/testutil/CountingQueryEventHook.java
 create mode 100644 fe/src/test/java/org/apache/impala/testutil/DummyQueryEventHook.java
 copy common/yarn-extras/src/main/java/org/apache/impala/yarn/server/resourcemanager/resource/ResourceWeights.java => fe/src/test/java/org/apache/impala/testutil/PostQueryErrorEventHook.java (63%)
 create mode 100644 testdata/workloads/functional-query/queries/QueryTest/views-compatibility-hive2-only.test
 create mode 100644 testdata/workloads/functional-query/queries/QueryTest/views-compatibility-hive3-only.test
 create mode 100644 tests/custom_cluster/test_query_event_hooks.py


[impala] 03/08: [DOCS] Corrected the privileges required for CREATE FUNCTION

Posted by bo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

boroknagyz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 90da7fa48a3f3d7b781172d91dfa6026c5d7fab8
Author: Alex Rodoni <ar...@cloudera.com>
AuthorDate: Thu May 23 17:44:45 2019 -0700

    [DOCS] Corrected the privileges required for CREATE FUNCTION
    
    Change-Id: I51c8ec9d794e3364e700193c5a8f108ef270ac62
    Reviewed-on: http://gerrit.cloudera.org:8080/13420
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
    Reviewed-by: Fredy Wijaya <fw...@cloudera.com>
---
 docs/topics/impala_create_function.xml | 35 ++++++++++------------------------
 docs/topics/impala_udf.xml             | 33 ++++++++------------------------
 2 files changed, 18 insertions(+), 50 deletions(-)

diff --git a/docs/topics/impala_create_function.xml b/docs/topics/impala_create_function.xml
index 1bdf494..9861fd9 100644
--- a/docs/topics/impala_create_function.xml
+++ b/docs/topics/impala_create_function.xml
@@ -37,11 +37,9 @@ under the License.
 
   <conbody>
 
-    <p>
-      <indexterm audience="hidden">CREATE FUNCTION statement</indexterm>
-      Creates a user-defined function (UDF), which you can use to implement custom logic during
-      <codeph>SELECT</codeph> or <codeph>INSERT</codeph> operations.
-    </p>
+    <p>  Creates a user-defined function (UDF), which you can use to implement
+      custom logic during <codeph>SELECT</codeph> or <codeph>INSERT</codeph>
+      operations. </p>
 
     <p conref="../shared/impala_common.xml#common/syntax_blurb"/>
 
@@ -74,14 +72,6 @@ under the License.
   SYMBOL='<varname>class_name</varname>'</codeblock>
     </p>
 
-<!--
-Examples:
-CREATE FUNCTION IF NOT EXISTS foo location '/path/to/jar' SYMBOL='TestUdf';
-CREATE FUNCTION bar location '/path/to/jar' SYMBOL='TestUdf2';
-DROP FUNCTION foo;
-DROP FUNCTION IF EXISTS bar;
--->
-
     <p>
       To create a persistent UDA, which must be written in C++, issue a <codeph>CREATE AGGREGATE FUNCTION</codeph> statement:
     </p>
@@ -212,18 +202,6 @@ DROP FUNCTION IF EXISTS bar;
       determine the names based on the first such clause, so the others are optional.
     </p>
 
-    <p audience="hidden">
-      The <codeph>INTERMEDIATE</codeph> clause specifies the data type of intermediate values passed from the
-      <q>update</q> phase to the <q>merge</q> phase, and from the <q>merge</q> phase to the <q>finalize</q> phase.
-      You can use any of the existing Impala data types, or the special notation
-      <codeph>CHAR(<varname>n</varname>)</codeph> to allocate a scratch area of <varname>n</varname> bytes for the
-      intermediate result. For example, if the different phases of your UDA pass strings to each other but in the
-      end the function returns a <codeph>BIGINT</codeph> value, you would specify <codeph>INTERMEDIATE
-      STRING</codeph>. Likewise, if the different phases of your UDA pass 2 separate <codeph>BIGINT</codeph> values
-      between them (8 bytes each), you would specify <codeph>INTERMEDIATE CHAR(16)</codeph> so that each function
-      could read from and write to a 16-byte buffer.
-    </p>
-
     <p>
       For end-to-end examples of UDAs, see <xref href="impala_udf.xml#udfs"/>.
     </p>
@@ -235,6 +213,13 @@ DROP FUNCTION IF EXISTS bar;
     <p conref="../shared/impala_common.xml#common/usage_notes_blurb"/>
 
     <ul>
+      <li> When authorization is enabled, the <codeph>CREATE FUNCTION</codeph>
+        statement requires:<ul>
+          <li>The <codeph>CREATE</codeph> privilege on the database.</li>
+          <li>The <codeph>ALL</codeph> privilege on URI where URI is the value
+            you specified for the <codeph>LOCATION</codeph> in the
+              <codeph>CREATE FUNCTION</codeph> statement. </li>
+        </ul></li>
       <li>
         You can write Impala UDFs in either C++ or Java. C++ UDFs are new to Impala, and are the recommended format
         for high performance utilizing native code. Java-based UDFs are compatible between Impala and Hive, and are
diff --git a/docs/topics/impala_udf.xml b/docs/topics/impala_udf.xml
index 4811643..8d6c382 100644
--- a/docs/topics/impala_udf.xml
+++ b/docs/topics/impala_udf.xml
@@ -45,13 +45,8 @@ under the License.
       copying from one table to another with the <codeph>INSERT ... SELECT</codeph> syntax.
     </p>
 
-    <p>
-      You might be familiar with this feature from other database products, under names such as stored functions or
-      stored routines.
-<!--
-    , user-defined aggregate functions (UDAFs), table functions, or window functions.
-    -->
-    </p>
+    <p> You might be familiar with this feature from other database products,
+      under names such as stored functions or stored routines.  </p>
 
     <p>
       Impala support for UDFs is available in Impala 1.2 and higher:
@@ -137,18 +132,6 @@ select most_profitable_location(store_id, sales, expenses, tax_rate, depreciatio
             Currently, Impala does not support other categories of user-defined functions, such as user-defined
             table functions (UDTFs) or window functions.
           </li>
-
-<!--
-<li>
-A user-defined table function (UDTF) returns an arbitrary number of rows (zero, one, or many) for each input row.
-These functions filter, explode, or transform the input data in a variety of ways.
-Currently, Impala does not support UDTFs.
-For example:
-<codeblock>select anomalous_event() from web_traffic;
-select price_change() from stock_ticker;
-select real_words(letters) from word_games;</codeblock>
-</li>
--->
         </ul>
       </conbody>
     </concept>
@@ -1860,12 +1843,12 @@ Returned 2 row(s) in 0.43s</codeblock>
           To call a UDF in a query, you must have the required read privilege for any databases and tables used in
           the query.
         </li>
-
-        <li>
-          Because incorrectly coded UDFs could cause performance or capacity problems, for example by going into
-          infinite loops or allocating excessive amounts of memory, only an administrative user can create UDFs.
-          That is, to execute the <codeph>CREATE FUNCTION</codeph> statement requires the <codeph>ALL</codeph>
-          privilege on the server.
+        <li> The <codeph>CREATE FUNCTION</codeph> statement requires:<ul>
+            <li>The <codeph>CREATE</codeph> privilege on the database.</li>
+            <li>The <codeph>ALL</codeph> privilege on URI where URI is the value
+              you specified for the <codeph>LOCATION</codeph> in the
+                <codeph>CREATE FUNCTION</codeph> statement. </li>
+          </ul>
         </li>
       </ul>
 


[impala] 06/08: IMPALA-8473: Publish lineage info via hook

Posted by bo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

boroknagyz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 31195eb8119ac6a557486a10dc24692bb0202f85
Author: Radford Nguyen <ra...@cloudera.com>
AuthorDate: Wed May 15 19:59:21 2019 -0700

    IMPALA-8473: Publish lineage info via hook
    
    This commit introduces a hook mechanism for publishing,
    lineage data specifically, but query information more
    generally, from Impala.
    
    The legacy behavior of writing the lineage file is
    being retained but deprecated.
    
    Hooks can be implemented by downstream consumers (i.e.
    runtime dependencies) to hook into supported places during
    Impala query execution:
    
    - impalad startup
    - query completion
        - see IMPALA-8572 for caveat/details
    
    The consumers are to be frontend Java dependencies
    intiated at runtime. 2 backend flags configure this
    behavior:
    
    - `query_event_hook_classes` specifies a comma-separated
    list of hook consumer implementation classes that
    are instantiated and registered at impala start up.
    
    - `query_event_hook_nthreads`
    specifies the number of threads to use for asynchronous
    hook execution.  (Relevant if multiple hooks are
    registered.)
    
    Lineage information is passed from the backend after
    a query completes (but before it returns) and given
    to every hook to execute asynchronously.  In other words,
    a query may complete and return to the user before any
    or all hooks have completed executing.  An exception
    during hook on-query-complete execution will simply be logged
    and will not be (directly) fatal to the system.
    
    Tests:
    - added unit tests for FE hook execution
    - added E2E tests for hook configuration, execution, error
    - ran full build, tests
    
    Change-Id: I23a896537a98bfef07fb27c70e9a87c105cd77a1
    Reviewed-on: http://gerrit.cloudera.org:8080/13352
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 be/src/service/frontend.cc                         |   7 +
 be/src/service/frontend.h                          |   5 +
 be/src/service/impala-server.cc                    |  57 ++++-
 be/src/service/impala-server.h                     |   6 +
 be/src/util/backend-gflag-util.cc                  |   4 +
 common/thrift/BackendGflags.thrift                 |   4 +
 common/thrift/Frontend.thrift                      |  10 +
 .../apache/impala/hooks/QueryCompleteContext.java  |  56 +++++
 .../org/apache/impala/hooks/QueryEventHook.java    | 116 +++++++++++
 .../apache/impala/hooks/QueryEventHookManager.java | 229 +++++++++++++++++++++
 .../org/apache/impala/service/BackendConfig.java   |   8 +
 .../java/org/apache/impala/service/Frontend.java   |  71 ++++++-
 .../org/apache/impala/service/JniFrontend.java     |  43 ++--
 .../impala/hooks/QueryEventHookManagerTest.java    | 146 +++++++++++++
 .../impala/testutil/AlwaysErrorQueryEventHook.java |  33 +++
 .../impala/testutil/CountingQueryEventHook.java    |  52 +++++
 .../impala/testutil/DummyQueryEventHook.java       |  53 +++++
 .../impala/testutil/PostQueryErrorEventHook.java   |  32 +++
 tests/authorization/test_provider.py               |   4 +-
 tests/custom_cluster/test_query_event_hooks.py     | 202 ++++++++++++++++++
 20 files changed, 1112 insertions(+), 26 deletions(-)

diff --git a/be/src/service/frontend.cc b/be/src/service/frontend.cc
index 616d8e4..6ef000a 100644
--- a/be/src/service/frontend.cc
+++ b/be/src/service/frontend.cc
@@ -109,6 +109,7 @@ Frontend::Frontend() {
     {"getTableFiles", "([B)[B", &get_table_files_id_},
     {"showCreateFunction", "([B)Ljava/lang/String;", &show_create_function_id_},
     {"buildTestDescriptorTable", "([B)[B", &build_test_descriptor_table_id_},
+    {"callQueryCompleteHooks", "([B)V", &call_query_complete_hooks_id_}
   };
 
   JNIEnv* jni_env = JniUtil::GetJNIEnv();
@@ -293,3 +294,9 @@ Status Frontend::BuildTestDescriptorTable(const TBuildTestDescriptorTableParams&
     TDescriptorTable* result) {
   return JniUtil::CallJniMethod(fe_, build_test_descriptor_table_id_, params, result);
 }
+
+// Call FE post-query execution hook
+Status Frontend::CallQueryCompleteHooks(const TQueryCompleteContext& context) {
+  return JniUtil::CallJniMethod(fe_, call_query_complete_hooks_id_, context);
+}
+
diff --git a/be/src/service/frontend.h b/be/src/service/frontend.h
index abcc6c3..f063fce 100644
--- a/be/src/service/frontend.h
+++ b/be/src/service/frontend.h
@@ -24,6 +24,7 @@
 #include "gen-cpp/ImpalaHiveServer2Service.h"
 #include "gen-cpp/ImpalaInternalService.h"
 #include "gen-cpp/Frontend_types.h"
+#include "gen-cpp/LineageGraph_types.h"
 #include "common/status.h"
 
 namespace impala {
@@ -185,6 +186,9 @@ class Frontend {
   Status BuildTestDescriptorTable(const TBuildTestDescriptorTableParams& params,
       TDescriptorTable* result);
 
+  // Call FE post-query execution hook
+  Status CallQueryCompleteHooks(const TQueryCompleteContext& context);
+
  private:
   jobject fe_;  // instance of org.apache.impala.service.JniFrontend
   jmethodID create_exec_request_id_;  // JniFrontend.createExecRequest()
@@ -213,6 +217,7 @@ class Frontend {
   jmethodID wait_for_catalog_id_; // JniFrontend.waitForCatalog
   jmethodID get_table_files_id_; // JniFrontend.getTableFiles
   jmethodID show_create_function_id_; // JniFrontend.showCreateFunction
+  jmethodID call_query_complete_hooks_id_; // JniFrontend.callQueryCompleteHooks
 
   // Only used for testing.
   jmethodID build_test_descriptor_table_id_; // JniFrontend.buildTestDescriptorTable()
diff --git a/be/src/service/impala-server.cc b/be/src/service/impala-server.cc
index d584f6f..f8d25b5 100644
--- a/be/src/service/impala-server.cc
+++ b/be/src/service/impala-server.cc
@@ -92,6 +92,7 @@
 #include "gen-cpp/ImpalaService_types.h"
 #include "gen-cpp/ImpalaInternalService.h"
 #include "gen-cpp/LineageGraph_types.h"
+#include "gen-cpp/Frontend_types.h"
 
 #include "common/names.h"
 
@@ -256,6 +257,14 @@ DEFINE_int64(accepted_client_cnxn_timeout, 300000,
     "the post-accept, pre-setup connection queue before it is timed out and the "
     "connection request is rejected. A value of 0 means there is no timeout.");
 
+DEFINE_string(query_event_hook_classes, "", "Comma-separated list of java QueryEventHook "
+    "implementation classes to load and register at Impala startup. Class names should "
+    "be fully-qualified and on the classpath. Whitespace acceptable around delimiters.");
+
+DEFINE_int32(query_event_hook_nthreads, 1, "Number of threads to use for "
+    "QueryEventHook execution. If this number is >1 then hooks will execute "
+    "concurrently.");
+
 DECLARE_bool(compact_catalog_topic);
 
 namespace impala {
@@ -485,17 +494,42 @@ Status ImpalaServer::LogLineageRecord(const ClientRequestState& client_request_s
   // Set the query end time in TLineageGraph. Must use UNIX time directly rather than
   // e.g. converting from client_request_state.end_time() (IMPALA-4440).
   lineage_graph.__set_ended(UnixMillis() / 1000);
+
   string lineage_record;
   LineageUtil::TLineageToJSON(lineage_graph, &lineage_record);
-  const Status& status = lineage_logger_->AppendEntry(lineage_record);
-  if (!status.ok()) {
-    LOG(ERROR) << "Unable to record query lineage record: " << status.GetDetail();
-    if (FLAGS_abort_on_failed_lineage_event) {
-      CLEAN_EXIT_WITH_ERROR("Shutting down Impala Server due to "
-          "abort_on_failed_lineage_event=true");
+
+  if (AreQueryHooksEnabled()) {
+    // invoke QueryEventHooks
+    TQueryCompleteContext query_complete_context;
+    query_complete_context.__set_lineage_string(lineage_record);
+    const Status& status = exec_env_->frontend()->CallQueryCompleteHooks(
+        query_complete_context);
+
+    if (!status.ok()) {
+      LOG(ERROR) << "Failed to send query lineage info to FE CallQueryCompleteHooks"
+                 << status.GetDetail();
+      if (FLAGS_abort_on_failed_lineage_event) {
+        CLEAN_EXIT_WITH_ERROR("Shutting down Impala Server due to "
+            "abort_on_failed_lineage_event=true");
+      }
     }
   }
-  return status;
+
+  // lineage logfile writing is deprecated in favor of the
+  // QueryEventHooks (see FE).  this behavior is being retained
+  // for now but may be removed in the future.
+  if (IsLineageLoggingEnabled()) {
+    const Status& status = lineage_logger_->AppendEntry(lineage_record);
+    if (!status.ok()) {
+      LOG(ERROR) << "Unable to record query lineage record: " << status.GetDetail();
+      if (FLAGS_abort_on_failed_lineage_event) {
+        CLEAN_EXIT_WITH_ERROR("Shutting down Impala Server due to "
+            "abort_on_failed_lineage_event=true");
+      }
+    }
+    return status;
+  }
+  return Status::OK();
 }
 
 bool ImpalaServer::IsCoordinator() { return is_coordinator_; }
@@ -529,6 +563,10 @@ bool ImpalaServer::IsLineageLoggingEnabled() {
   return !FLAGS_lineage_event_log_dir.empty();
 }
 
+bool ImpalaServer::AreQueryHooksEnabled() {
+  return !FLAGS_query_event_hook_classes.empty();
+}
+
 Status ImpalaServer::InitLineageLogging() {
   if (!IsLineageLoggingEnabled()) {
     LOG(INFO) << "Lineage logging is disabled";
@@ -673,7 +711,9 @@ void ImpalaServer::LogQueryEvents(const ClientRequestState& request_state) {
     // TODO: deal with an error status
     discard_result(LogAuditRecord(request_state, request_state.exec_request()));
   }
-  if (IsLineageLoggingEnabled() && log_events) {
+
+  if (log_events &&
+      (AreQueryHooksEnabled() || IsLineageLoggingEnabled())) {
     // TODO: deal with an error status
     discard_result(LogLineageRecord(request_state));
   }
@@ -1217,6 +1257,7 @@ Status ImpalaServer::UnregisterQuery(const TUniqueId& query_id, bool check_infli
       ImpaladMetrics::QUERY_DURATIONS->Update(duration_ms);
     }
   }
+  // TODO (IMPALA-8572): move LogQueryEvents to before query unregistration
   LogQueryEvents(*request_state.get());
 
   {
diff --git a/be/src/service/impala-server.h b/be/src/service/impala-server.h
index 5987de0..b23a518 100644
--- a/be/src/service/impala-server.h
+++ b/be/src/service/impala-server.h
@@ -377,8 +377,14 @@ class ImpalaServer : public ImpalaServiceIf,
   void WaitForCatalogUpdateTopicPropagation(const TUniqueId& catalog_service_id);
 
   /// Returns true if lineage logging is enabled, false otherwise.
+  ///
+  /// DEPRECATED: lineage file logging has been deprecated in favor of
+  ///             query execution hooks (FE)
   bool IsLineageLoggingEnabled();
 
+  /// Returns true if query execution (FE) hooks are enabled, false otherwise.
+  bool AreQueryHooksEnabled();
+
   /// Retuns true if this is a coordinator, false otherwise.
   bool IsCoordinator();
 
diff --git a/be/src/util/backend-gflag-util.cc b/be/src/util/backend-gflag-util.cc
index e6b1570..99b2926 100644
--- a/be/src/util/backend-gflag-util.cc
+++ b/be/src/util/backend-gflag-util.cc
@@ -77,6 +77,8 @@ DECLARE_string(ranger_service_type);
 DECLARE_string(ranger_app_id);
 DECLARE_string(authorization_provider);
 DECLARE_bool(recursively_list_partitions);
+DECLARE_string(query_event_hook_classes);
+DECLARE_int32(query_event_hook_nthreads);
 
 namespace impala {
 
@@ -153,6 +155,8 @@ Status GetThriftBackendGflags(JNIEnv* jni_env, jbyteArray* cfg_bytes) {
   cfg.__set_ranger_app_id(FLAGS_ranger_app_id);
   cfg.__set_authorization_provider(FLAGS_authorization_provider);
   cfg.__set_recursively_list_partitions(FLAGS_recursively_list_partitions);
+  cfg.__set_query_event_hook_classes(FLAGS_query_event_hook_classes);
+  cfg.__set_query_event_hook_nthreads(FLAGS_query_event_hook_nthreads);
   RETURN_IF_ERROR(SerializeThriftMsg(jni_env, &cfg, cfg_bytes));
   return Status::OK();
 }
diff --git a/common/thrift/BackendGflags.thrift b/common/thrift/BackendGflags.thrift
index c242c3e..6574faf 100644
--- a/common/thrift/BackendGflags.thrift
+++ b/common/thrift/BackendGflags.thrift
@@ -129,4 +129,8 @@ struct TBackendGflags {
   51: required string authorization_provider
 
   52: required bool recursively_list_partitions
+
+  53: required string query_event_hook_classes
+
+  54: required i32 query_event_hook_nthreads
 }
diff --git a/common/thrift/Frontend.thrift b/common/thrift/Frontend.thrift
index 2599910..fe26ac6 100644
--- a/common/thrift/Frontend.thrift
+++ b/common/thrift/Frontend.thrift
@@ -932,3 +932,13 @@ struct TTestCaseData {
   // underlying thrift layout changes.
   5: required string impala_version
 }
+
+// Information about a query sent to the FE QueryEventHooks
+// after query execution
+struct TQueryCompleteContext {
+  // the serialized lineage graph of the query, with optional BE-populated information
+  //
+  // this is an experimental feature and the format will likely change
+  // in a future version
+  1: required string lineage_string
+}
diff --git a/fe/src/main/java/org/apache/impala/hooks/QueryCompleteContext.java b/fe/src/main/java/org/apache/impala/hooks/QueryCompleteContext.java
new file mode 100644
index 0000000..23ea562
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/hooks/QueryCompleteContext.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.impala.hooks;
+
+import java.util.Objects;
+
+/**
+ * {@link QueryCompleteContext} encapsulates immutable information sent from the
+ * BE to a post-query hook.
+ */
+public class QueryCompleteContext {
+  private final String lineageGraph_;
+
+  public QueryCompleteContext(String lineageGraph) {
+    lineageGraph_ = Objects.requireNonNull(lineageGraph);
+  }
+
+  /**
+   * Returns the lineage graph sent from the backend during
+   * {@link QueryEventHook#onQueryComplete(QueryCompleteContext)}.  This graph
+   * object will generally contain more information than it did when it was
+   * first constructed in the frontend, because the backend will have filled
+   * in additional information.
+   * <p>
+   * The returned object is a JSON representation of the lineage graph object
+   * for the query.  The details of the JSON translation are not provided here
+   * as this is meant to be a temporary feature, and the String format will
+   * be changed to something more strongly-typed in the future.
+   * </p>
+   *
+   * @return lineage graph from the query that executed
+   */
+  public String getLineageGraph() { return lineageGraph_; }
+
+  @Override
+  public String toString() {
+    return "QueryCompleteContext{" +
+        "lineageGraph='" + lineageGraph_ + '\'' +
+        '}';
+  }
+}
diff --git a/fe/src/main/java/org/apache/impala/hooks/QueryEventHook.java b/fe/src/main/java/org/apache/impala/hooks/QueryEventHook.java
new file mode 100644
index 0000000..80ee5a5
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/hooks/QueryEventHook.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.impala.hooks;
+
+/**
+ * {@link QueryEventHook} is the interface for implementations that
+ * can hook into supported events in Impala query execution.
+ */
+public interface QueryEventHook {
+  /**
+   * Hook method invoked when the Impala daemon starts up.
+   * <p>
+   * This method will block completion of daemon startup, so you should
+   * execute any long-running actions asynchronously.
+   * </p>
+   * <h3>Error-Handling</h3>
+   * <p>
+   * Any {@link Exception} thrown from this method will effectively fail
+   * Impala startup with an error. Implementations should handle all
+   * exceptions as gracefully as they can, even if the end result is to
+   * throw them.
+   * </p>
+   */
+  void onImpalaStartup();
+
+  /**
+   * Hook method invoked asynchronously when a (qualifying) Impala query
+   * has executed, but before it has returned.
+   * <p>
+   * This method will not block the invoking or subsequent queries,
+   * but may block future hook invocations if it runs for too long
+   * </p>
+   * <h3>Error-Handling</h3>
+   * <p>
+   * Any {@link Throwable} thrown from this method will only be caught
+   * and logged and will not affect the result of any query.  Hook implementations
+   * should make a best-effort to handle their own exceptions.
+   * </p>
+   * <h3>Important:</h3>
+   * <p>
+   * This hook is actually invoked when the query is <i>unregistered</i>,
+   * which may happen a long time after the query has executed.
+   * e.g. the following sequence is possible:
+   * <ol>
+   *  <li>User executes query from Hue.
+   *  <li>User goes home for weekend, leaving Hue tab open in browser
+   *  <li>If we're lucky, the session timeout expires after some amount of idle time.
+   *  <li>The query gets unregistered, lineage record gets logged
+   * </ol>
+   * </p>
+   * <h3>Service Guarantees</h3>
+   *
+   * Impala makes the following guarantees about how this method is executed
+   * with respect to other implementations that may be registered:
+   *
+   * <h4>Hooks are executed asynchronously</h4>
+   *
+   * All hook execution happens asynchronously of the query that triggered
+   * them.  Hooks may still be executing after the query response has returned
+   * to the caller.  Additionally, hooks may execute concurrently if the
+   * hook executor thread size is configured appropriately.
+   *
+   * <h4>Hook Invocation is in Configuration Order</h4>
+   *
+   * The <i>submission</i> of the hook execution tasks occurs in the order
+   * that the hooks were defined in configuration.  This generally means that
+   * hooks will <i>start</i> executing in order, but there are no guarantees
+   * about finishing order.
+   * <p>
+   * For example, if configured with {@code query_event_hook_classes=hook1,hook2,hook3},
+   * then hook1 will start before hook2, and hook2 will start before hook3.
+   * If you need to guarantee that hook1 <i>completes</i> before hook2 starts, then
+   * you should specify {@code query_event_hook_nthreads=1} for serial hook
+   * execution.
+   * </p>
+   *
+   * <h4>Hook Execution Blocks</h4>
+   *
+   * A hook will block the thread it executes on until it completes.  If a hook hangs,
+   * then the thread also hangs.  Impala (currently) will not check for hanging hooks to
+   * take any action.  This means that if you have {@code query_event_hook_nthreads}
+   * less than the number of hooks, then 1 hook may effectively block others from
+   * executing.
+   *
+   * <h4>Hook Exceptions are non-fatal</h4>
+   *
+   * Any exception thrown from this hook method will be logged and ignored.  Therefore,
+   * an exception in 1 hook will not affect another hook (when no shared resources are
+   * involved).
+   *
+   * <h4>Hook Execution may end abruptly at Impala shutdown</h4>
+   *
+   * If a hook is still executing when Impala is shutdown, there are no guarantees
+   * that it will complete execution before being killed.
+   *
+   *
+   * @param context object containing the post execution context
+   *                of the query
+   */
+  void onQueryComplete(QueryCompleteContext context);
+}
diff --git a/fe/src/main/java/org/apache/impala/hooks/QueryEventHookManager.java b/fe/src/main/java/org/apache/impala/hooks/QueryEventHookManager.java
new file mode 100644
index 0000000..67ba808
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/hooks/QueryEventHookManager.java
@@ -0,0 +1,229 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.impala.hooks;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.impala.common.InternalException;
+import org.apache.impala.service.BackendConfig;
+import org.apache.impala.thrift.TBackendGflags;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.stream.Collectors;
+
+/**
+ * {@link QueryEventHookManager} manages the registration and execution of
+ * {@link QueryEventHook}s. Each manager instance may manage its own hooks,
+ * though the expected use-case is to have 1 instance per process, usually
+ * owned by the frontend. This class is not thread-safe.
+ *
+ * <h3>Hook Registration</h3>
+ *
+ * The hook implementation(s) to use at runtime are specified through the
+ * backend config flag {@link TBackendGflags#query_event_hook_classes}
+ * at Impala startup. See {@link #createFromConfig(BackendConfig)}.
+ *
+ * <h3>Hook Classloading</h3>
+ *
+ * Each hook implementation is loaded using `this` manager's classloader; no
+ * classloader isolation is performed.  Individual hook implementations should
+ * take care to properly handle any dependencies they bring in to avoid shadowing
+ * existing dependencies on the Impala classpath.
+ *
+ * <h3>Hook Execution</h3>
+ *
+ * Hook initialization ({@link QueryEventHook#onImpalaStartup()} is
+ * performed synchronously during {@link #createFromConfig(BackendConfig)}.
+ * <p>
+ * {@link QueryEventHook#onQueryComplete(QueryCompleteContext)} is performed
+ * asynchronously during {@link #executeQueryCompleteHooks(QueryCompleteContext)}.
+ * This execution is performed by a thread-pool executor, whose size is set at
+ * compile-time.  This means that hooks may also execute concurrently.
+ * </p>
+ *
+ */
+public class QueryEventHookManager {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(QueryEventHookManager.class);
+
+  // TODO: figure out a way to source these from the defn so
+  //       we don't have to manually sync when they change
+  private static final String BE_HOOKS_FLAG = "query_event_hook_classes";
+  private static final String BE_HOOKS_THREADS_FLAG = "query_event_hook_nthreads";
+
+  private final List<QueryEventHook> hooks_;
+  private final ExecutorService hookExecutor_;
+
+  /**
+   * Static factory method to create a manager instance.  This will register
+   * all {@link QueryEventHook}s specified by the backend config flag
+   * {@code query_event_hook_classes} and then invoke their
+   * {@link QueryEventHook#onImpalaStartup()} methods synchronously.
+   *
+   * @throws IllegalArgumentException if config is invalid
+   * @throws InternalException if any hook could not be instantiated
+   * @throws InternalException if any hook.onImpalaStartup() throws an exception
+   */
+  public static QueryEventHookManager createFromConfig(BackendConfig config)
+      throws InternalException {
+
+    final int nHookThreads = config.getNumQueryExecHookThreads();
+    final String queryExecHookClasses = config.getQueryExecHookClasses();
+    LOG.info("QueryEventHook config:");
+    LOG.info("- {}={}", BE_HOOKS_THREADS_FLAG, nHookThreads);
+    LOG.info("- {}={}", BE_HOOKS_FLAG, queryExecHookClasses);
+
+    final String[] hookClasses;
+    if (StringUtils.isNotEmpty(queryExecHookClasses)) {
+      hookClasses = queryExecHookClasses.split("\\s*,\\s*");
+    } else {
+      hookClasses = new String[0];
+    }
+
+    return new QueryEventHookManager(nHookThreads, hookClasses);
+  }
+
+  /**
+   * Instantiates a manager with a fixed-size thread-pool executor for
+   * executing {@link QueryEventHook#onQueryComplete(QueryCompleteContext)}.
+   *
+   * @param nHookExecutorThreads
+   * @param hookClasses
+   *
+   * @throws IllegalArgumentException if {@code nHookExecutorThreads <= 0}
+   * @throws InternalException if any hookClass cannot be instantiated
+   * @throws InternalException if any hookClass.onImpalaStartup throws an exception
+   */
+  private QueryEventHookManager(int nHookExecutorThreads, String[] hookClasses)
+      throws InternalException {
+
+    this.hookExecutor_ = Executors.newFixedThreadPool(nHookExecutorThreads);
+    Runtime.getRuntime().addShutdownHook(new Thread(() -> this.cleanUp()));
+
+    final List<QueryEventHook> hooks = new ArrayList<>(hookClasses.length);
+    this.hooks_ = Collections.unmodifiableList(hooks);
+
+    for (String postExecHook : hookClasses) {
+      final QueryEventHook hook;
+      try {
+        final Class<QueryEventHook> clsHook =
+            (Class<QueryEventHook>) Class.forName(postExecHook);
+        hook = clsHook.newInstance();
+      } catch (InstantiationException
+          | IllegalAccessException
+          | ClassNotFoundException e) {
+        final String msg = String.format(
+            "Unable to instantiate query event hook class %s. Please check %s config",
+            postExecHook, BE_HOOKS_FLAG);
+        LOG.error(msg, e);
+        throw new InternalException(msg, e);
+      }
+
+      hooks.add(hook);
+    }
+
+    for (QueryEventHook hook : hooks) {
+      try {
+        LOG.debug("Initiating hook.onImpalaStartup for {}", hook.getClass().getName());
+        hook.onImpalaStartup();
+      }
+      catch (Exception e) {
+        final String msg = String.format(
+            "Exception during onImpalaStartup from QueryEventHook %s instance=%s",
+            hook.getClass(), hook);
+        LOG.error(msg, e);
+        throw new InternalException(msg, e);
+      }
+    }
+  }
+
+  private void cleanUp() {
+    if (!hookExecutor_.isShutdown()) {
+      hookExecutor_.shutdown();
+    }
+    // TODO (IMPALA-8571): we may want to await termination (up to a timeout)
+    // to ensure that hooks have a chance to complete execution.  Executor
+    // threads will typically run to completion after executor shutdown, but
+    // there are some instances where this doesnt hold. e.g.
+    //
+    // - executor thread is sleeping when shutdown is called
+    // - system.exit called
+  }
+
+  /**
+   * Returns an unmodifiable view of all the {@link QueryEventHook}s
+   * registered at construction.
+   *
+   * @return unmodifiable view of all currently-registered hooks
+   */
+  public List<QueryEventHook> getHooks() {
+    return hooks_;
+  }
+
+  /**
+   * Hook method to be called after query execution.  This implementation
+   * will execute all currently-registered {@link QueryEventHook}s
+   * asynchronously, returning immediately with a List of {@link Future}s
+   * representing each hook's {@link QueryEventHook#onQueryComplete(QueryCompleteContext)}
+   * invocation.
+   *
+   * <h3>Futures</h3>
+   *
+   * This method will return a list of {@link Future}s representing the future results
+   * of each hook's invocation.  The {@link Future#get()} method will return the
+   * hook instance whose invocation it represents.  The list of futures are in the
+   * same order as the order in which each hook's job was submitted.
+   *
+   * <h3>Error-Handling</h3>
+   *
+   * Exceptions thrown from {@link QueryEventHook#onQueryComplete(QueryCompleteContext)}
+   * will be logged and then rethrown on the executor thread(s), meaning that they
+   * will not halt execution.  Rather, they will be encapsulated in the returned
+   * {@link Future}s, meaning that the caller may choose to check or ignore them
+   * at some later time.
+   *
+   * @param context
+   */
+  public List<Future<QueryEventHook>> executeQueryCompleteHooks(
+      QueryCompleteContext context) {
+    LOG.debug("Query complete hook invoked with: {}", context);
+    return hooks_.stream().map(hook -> {
+      LOG.debug("Initiating onQueryComplete: {}", hook.getClass().getName());
+      return hookExecutor_.submit(() -> {
+        try {
+          hook.onQueryComplete(context);
+        } catch (Throwable t) {
+          final String msg = String.format("Exception thrown by QueryEventHook %s"+
+              ".onQueryComplete method.  Hook instance %s. This exception is "+
+              "currently being ignored by Impala, "+
+              "but may cause subsequent problems in that hook's execution",
+              hook.getClass().getName(), hook);
+          LOG.error(msg, t);
+          throw t;
+        }
+        return hook;
+      });
+    }).collect(Collectors.toList());
+  }
+}
diff --git a/fe/src/main/java/org/apache/impala/service/BackendConfig.java b/fe/src/main/java/org/apache/impala/service/BackendConfig.java
index 97e0b64..44bf0a9 100644
--- a/fe/src/main/java/org/apache/impala/service/BackendConfig.java
+++ b/fe/src/main/java/org/apache/impala/service/BackendConfig.java
@@ -161,6 +161,14 @@ public class BackendConfig {
     return backendCfg_.getAuthorization_provider();
   }
 
+  public String getQueryExecHookClasses() {
+    return backendCfg_.getQuery_event_hook_classes();
+  }
+
+  public int getNumQueryExecHookThreads() {
+    return backendCfg_.getQuery_event_hook_nthreads();
+  }
+
   // Inits the auth_to_local configuration in the static KerberosName class.
   private static void initAuthToLocal() {
     // If auth_to_local is enabled, we read the configuration hadoop.security.auth_to_local
diff --git a/fe/src/main/java/org/apache/impala/service/Frontend.java b/fe/src/main/java/org/apache/impala/service/Frontend.java
index 267458b..0c1ff46 100644
--- a/fe/src/main/java/org/apache/impala/service/Frontend.java
+++ b/fe/src/main/java/org/apache/impala/service/Frontend.java
@@ -26,6 +26,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Set;
 import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
@@ -92,6 +93,9 @@ import org.apache.impala.common.ImpalaException;
 import org.apache.impala.common.InternalException;
 import org.apache.impala.common.NotImplementedException;
 import org.apache.impala.compat.MetastoreShim;
+import org.apache.impala.hooks.QueryCompleteContext;
+import org.apache.impala.hooks.QueryEventHook;
+import org.apache.impala.hooks.QueryEventHookManager;
 import org.apache.impala.planner.HdfsScanNode;
 import org.apache.impala.planner.PlanFragment;
 import org.apache.impala.planner.Planner;
@@ -156,7 +160,7 @@ import com.google.common.util.concurrent.Uninterruptibles;
  * Frontend API for the impalad process.
  * This class allows the impala daemon to create TQueryExecRequest
  * in response to TClientRequests. Also handles management of the authorization
- * policy.
+ * policy and query execution hooks.
  */
 public class Frontend {
   private final static Logger LOG = LoggerFactory.getLogger(Frontend.class);
@@ -232,6 +236,8 @@ public class Frontend {
 
   private final ImpaladTableUsageTracker impaladTableUsageTracker_;
 
+  private final QueryEventHookManager queryHookManager_;
+
   public Frontend(AuthorizationFactory authzFactory) throws ImpalaException {
     this(authzFactory, FeCatalogManager.createFromBackendConfig());
   }
@@ -263,6 +269,7 @@ public class Frontend {
         authzChecker_::get);
     impaladTableUsageTracker_ = ImpaladTableUsageTracker.createFromConfig(
         BackendConfig.INSTANCE);
+    queryHookManager_ = QueryEventHookManager.createFromConfig(BackendConfig.INSTANCE);
   }
 
   public FeCatalog getCatalog() { return catalogManager_.getOrCreateCatalog(); }
@@ -1511,4 +1518,66 @@ public class Frontend {
           "Unsupported table class: " + table.getClass());
     }
   }
+
+  /**
+   * Executes the {@link QueryEventHook#onQueryComplete(QueryCompleteContext)}
+   * execution hooks for each hook registered in this instance's
+   * {@link QueryEventHookManager}.
+   *
+   * <h3>Service Guarantees</h3>
+   *
+   * Impala makes the following guarantees about how this method executes hooks:
+   *
+   * <h4>Hooks are executed asynchronously</h4>
+   *
+   * All hook execution happens asynchronously of the query that triggered
+   * them.  Hooks may still be executing after the query response has returned
+   * to the caller.  Additionally, hooks may execute concurrently if the
+   * hook executor thread size is configured appropriately.
+   *
+   * <h4>Hook Invocation is in Configuration Order</h4>
+   *
+   * The <i>submission</i> of the hook execution tasks occurs in the order
+   * that the hooks were defined in configuration.  This generally means that
+   * hooks will <i>start</i> executing in order, but there are no guarantees
+   * about finishing order.
+   * <p>
+   * For example, if configured with {@code query_event_hook_classes=hook1,hook2,hook3},
+   * then hook1 will start before hook2, and hook2 will start before hook3.
+   * If you need to guarantee that hook1 <i>completes</i> before hook2 starts, then
+   * you should specify {@code query_event_hook_nthreads=1} for serial hook
+   * execution.
+   * </p>
+   *
+   * <h4>Hook Execution Blocks</h4>
+   *
+   * A hook will block the thread it executes on until it completes.  If a hook hangs,
+   * then the thread also hangs.  Impala (currently) will not check for hanging hooks to
+   * take any action.  This means that if you have {@code query_event_hook_nthreads}
+   * less than the number of hooks, then 1 hook may effectively block others from
+   * executing.
+   *
+   * <h4>Hook Exceptions are non-fatal</h4>
+   *
+   * Any exception thrown from this hook method will be logged and ignored.  Therefore,
+   * an exception in 1 hook will not affect another hook (when no shared resources are
+   * involved).
+   *
+   * <h4>Hook Execution may end abruptly at Impala shutdown</h4>
+   *
+   * If a hook is still executing when Impala is shutdown, there are no guarantees
+   * that it will complete execution before being killed.
+   *
+   * @see QueryCompleteContext
+   * @see QueryEventHookManager
+   *
+   * @param context the execution context of the query
+   */
+  public void callQueryCompleteHooks(QueryCompleteContext context) {
+    // TODO (IMPALA-8571): can we make use of the futures to implement better
+    // error-handling?  Currently, the queryHookManager simply
+    // logs-then-rethrows any exception thrown from a hook.postQueryExecute
+    final List<Future<QueryEventHook>> futures
+        = this.queryHookManager_.executeQueryCompleteHooks(context);
+  }
 }
diff --git a/fe/src/main/java/org/apache/impala/service/JniFrontend.java b/fe/src/main/java/org/apache/impala/service/JniFrontend.java
index a798bba..631f9bd 100644
--- a/fe/src/main/java/org/apache/impala/service/JniFrontend.java
+++ b/fe/src/main/java/org/apache/impala/service/JniFrontend.java
@@ -17,23 +17,19 @@
 
 package org.apache.impala.service;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.Collections;
-import java.util.Enumeration;
-import java.util.List;
-import java.util.Map;
-
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.fs.adl.AdlFileSystem;
 import org.apache.hadoop.fs.azurebfs.AzureBlobFileSystem;
 import org.apache.hadoop.fs.azurebfs.SecureAzureBlobFileSystem;
-import org.apache.hadoop.fs.adl.AdlFileSystem;
+import org.apache.hadoop.fs.s3a.S3AFileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.security.Groups;
@@ -43,11 +39,8 @@ import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
 import org.apache.hadoop.security.ShellBasedUnixGroupsNetgroupMapping;
 import org.apache.impala.analysis.DescriptorTable;
 import org.apache.impala.analysis.ToSqlUtils;
-import org.apache.impala.authorization.AuthorizationConfig;
 import org.apache.impala.authorization.AuthorizationFactory;
-import org.apache.impala.authorization.AuthorizationProvider;
 import org.apache.impala.authorization.ImpalaInternalAdminUser;
-import org.apache.impala.authorization.NoopAuthorizationFactory;
 import org.apache.impala.authorization.User;
 import org.apache.impala.catalog.FeDataSource;
 import org.apache.impala.catalog.FeDb;
@@ -58,6 +51,7 @@ import org.apache.impala.common.FileSystemUtil;
 import org.apache.impala.common.ImpalaException;
 import org.apache.impala.common.InternalException;
 import org.apache.impala.common.JniUtil;
+import org.apache.impala.hooks.QueryCompleteContext;
 import org.apache.impala.service.Frontend.PlanCtx;
 import org.apache.impala.thrift.TBackendGflags;
 import org.apache.impala.thrift.TBuildTestDescriptorTableParams;
@@ -88,6 +82,7 @@ import org.apache.impala.thrift.TLoadDataReq;
 import org.apache.impala.thrift.TLoadDataResp;
 import org.apache.impala.thrift.TLogLevel;
 import org.apache.impala.thrift.TMetadataOpRequest;
+import org.apache.impala.thrift.TQueryCompleteContext;
 import org.apache.impala.thrift.TQueryCtx;
 import org.apache.impala.thrift.TResultSet;
 import org.apache.impala.thrift.TShowFilesParams;
@@ -110,9 +105,12 @@ import org.apache.thrift.protocol.TBinaryProtocol;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
+import java.io.File;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Enumeration;
+import java.util.List;
+import java.util.Map;
 
 /**
  * JNI-callable interface onto a wrapped Frontend instance. The main point is to serialise
@@ -124,6 +122,7 @@ public class JniFrontend {
       new TBinaryProtocol.Factory();
   private final Frontend frontend_;
 
+
   /**
    * Create a new instance of the Jni Frontend.
    */
@@ -625,6 +624,20 @@ public class JniFrontend {
   }
 
   /**
+   * JNI wrapper for {@link Frontend#callQueryCompleteHooks(QueryCompleteContext)}.
+   *
+   * @param serializedRequest
+   */
+  public void callQueryCompleteHooks(byte[] serializedRequest) throws ImpalaException {
+    final TQueryCompleteContext request = new TQueryCompleteContext();
+    JniUtil.deserializeThrift(protocolFactory_, request, serializedRequest);
+
+    final QueryCompleteContext context =
+        new QueryCompleteContext(request.getLineage_string());
+    this.frontend_.callQueryCompleteHooks(context);
+  }
+
+  /**
    * Returns an error string describing configuration issue with the groups mapping
    * provider implementation.
    */
diff --git a/fe/src/test/java/org/apache/impala/hooks/QueryEventHookManagerTest.java b/fe/src/test/java/org/apache/impala/hooks/QueryEventHookManagerTest.java
new file mode 100644
index 0000000..efbb0fb
--- /dev/null
+++ b/fe/src/test/java/org/apache/impala/hooks/QueryEventHookManagerTest.java
@@ -0,0 +1,146 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.hooks;
+
+import org.apache.impala.common.InternalException;
+import org.apache.impala.service.BackendConfig;
+import org.apache.impala.testutil.AlwaysErrorQueryEventHook;
+import org.apache.impala.testutil.CountingQueryEventHook;
+import org.apache.impala.testutil.PostQueryErrorEventHook;
+import org.apache.impala.thrift.TBackendGflags;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+import java.util.List;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+
+import static org.junit.Assert.assertEquals;
+
+public class QueryEventHookManagerTest {
+  private TBackendGflags origFlags;
+
+  @Rule
+  public ExpectedException expectedException = ExpectedException.none();
+  private QueryCompleteContext mockQueryCompleteContext =
+      new QueryCompleteContext("unit-test lineage");
+
+  @Before
+  public void setUp()  {
+    // since some test cases will need to modify the (static)
+    // be flags, we need to save the original values so they
+    // can be restored and not break other tests
+    if (BackendConfig.INSTANCE == null) {
+      BackendConfig.create(new TBackendGflags());
+    }
+    origFlags = BackendConfig.INSTANCE.getBackendCfg();
+  }
+
+  @After
+  public void tearDown() {
+    BackendConfig.create(origFlags);
+  }
+
+  private static QueryEventHookManager createQueryEventHookManager(int nThreads,
+      String... hooks) throws Exception {
+    if (hooks.length == 0) {
+      BackendConfig.INSTANCE.getBackendCfg().setQuery_event_hook_classes("");
+    } else {
+      BackendConfig.INSTANCE.getBackendCfg().setQuery_event_hook_classes(
+          String.join(",", hooks));
+    }
+
+    BackendConfig.INSTANCE.getBackendCfg().setQuery_event_hook_nthreads(nThreads);
+
+    return QueryEventHookManager.createFromConfig(BackendConfig.INSTANCE);
+  }
+
+  @Test
+  public void testHookRegistration() throws Exception {
+    final QueryEventHookManager mgr = createQueryEventHookManager(1,
+    CountingQueryEventHook.class.getCanonicalName(),
+        CountingQueryEventHook.class.getCanonicalName());
+
+    final List<QueryEventHook> hooks = mgr.getHooks();
+    assertEquals(2, hooks.size());
+    hooks.forEach(h -> assertEquals(CountingQueryEventHook.class, h.getClass()));
+  }
+
+  @Test
+  public void testHookPostQueryExecuteErrorsDoNotKillExecution() throws Exception {
+    // a hook that exceptions should not prevent a subsequent hook from executing
+    final QueryEventHookManager mgr = createQueryEventHookManager(1,
+        PostQueryErrorEventHook.class.getCanonicalName(),
+        CountingQueryEventHook.class.getCanonicalName());
+
+    // make sure error hook will execute first
+    assertEquals(mgr.getHooks().get(0).getClass(), PostQueryErrorEventHook.class);
+
+    final List<Future<QueryEventHook>> futures =
+        mgr.executeQueryCompleteHooks(mockQueryCompleteContext);
+
+    // this should not exception
+    final QueryEventHook hookImpl = futures.get(1).get(2, TimeUnit.SECONDS);
+
+    assertEquals(hookImpl.getClass(), CountingQueryEventHook.class);
+  }
+
+  @Test
+  public void testHookExceptionDuringStartupKillsStartup() throws Exception {
+    expectedException.expect(InternalException.class);
+
+    createQueryEventHookManager(1,
+        AlwaysErrorQueryEventHook.class.getCanonicalName(),
+        CountingQueryEventHook.class.getCanonicalName());
+  }
+
+  @Test
+  public void testHookPostQueryExecuteInvokedCorrectly() throws Exception {
+    final QueryEventHookManager mgr = createQueryEventHookManager(1,
+        CountingQueryEventHook.class.getCanonicalName(),
+        CountingQueryEventHook.class.getCanonicalName());
+
+    List<Future<QueryEventHook>> futures =
+        mgr.executeQueryCompleteHooks(mockQueryCompleteContext);
+
+    assertEquals(
+        futures.size(),
+        mgr.getHooks().size());
+
+    for (Future<QueryEventHook> f : futures) {
+      CountingQueryEventHook hook = (CountingQueryEventHook) f.get(2, TimeUnit.SECONDS);
+      assertEquals(1, hook.getPostQueryExecuteInvocations());
+    }
+
+    futures = mgr.executeQueryCompleteHooks(mockQueryCompleteContext);
+
+    assertEquals(
+        futures.size(),
+        mgr.getHooks().size());
+
+    for (Future<QueryEventHook> f : futures) {
+      CountingQueryEventHook hook = (CountingQueryEventHook) f.get(2, TimeUnit.SECONDS);
+      assertEquals(2, hook.getPostQueryExecuteInvocations());
+    }
+  }
+
+}
+
diff --git a/fe/src/test/java/org/apache/impala/testutil/AlwaysErrorQueryEventHook.java b/fe/src/test/java/org/apache/impala/testutil/AlwaysErrorQueryEventHook.java
new file mode 100644
index 0000000..cbdfb1d
--- /dev/null
+++ b/fe/src/test/java/org/apache/impala/testutil/AlwaysErrorQueryEventHook.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.impala.testutil;
+
+import org.apache.impala.hooks.QueryCompleteContext;
+import org.apache.impala.hooks.QueryEventHook;
+
+public class AlwaysErrorQueryEventHook implements QueryEventHook {
+  @Override
+  public void onImpalaStartup() {
+    throw new RuntimeException("intentional error");
+  }
+
+  @Override
+  public void onQueryComplete(QueryCompleteContext context) {
+    throw new RuntimeException("intentional error");
+  }
+}
diff --git a/fe/src/test/java/org/apache/impala/testutil/CountingQueryEventHook.java b/fe/src/test/java/org/apache/impala/testutil/CountingQueryEventHook.java
new file mode 100644
index 0000000..711255a
--- /dev/null
+++ b/fe/src/test/java/org/apache/impala/testutil/CountingQueryEventHook.java
@@ -0,0 +1,52 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.testutil;
+
+import org.apache.impala.hooks.QueryCompleteContext;
+import org.apache.impala.hooks.QueryEventHook;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+public class CountingQueryEventHook implements QueryEventHook {
+  private final AtomicInteger startupCount = new AtomicInteger(0);
+  private final AtomicInteger postQueryCount = new AtomicInteger(0);
+
+  @Override
+  public void onImpalaStartup() {
+    startupCount.incrementAndGet();
+  }
+
+  @Override
+  public void onQueryComplete(QueryCompleteContext context) {
+    postQueryCount.incrementAndGet();
+  }
+
+  /**
+   * @return # of times postQueryExecute has been invoked since construction
+   */
+  public int getImpalaStartupInvocations() {
+    return startupCount.get();
+  }
+
+  /**
+   * @return # of times postQueryExecute has been invoked since construction
+   */
+  public int getPostQueryExecuteInvocations() {
+    return postQueryCount.get();
+  }
+}
diff --git a/fe/src/test/java/org/apache/impala/testutil/DummyQueryEventHook.java b/fe/src/test/java/org/apache/impala/testutil/DummyQueryEventHook.java
new file mode 100644
index 0000000..72b224f
--- /dev/null
+++ b/fe/src/test/java/org/apache/impala/testutil/DummyQueryEventHook.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.impala.testutil;
+
+import org.apache.impala.hooks.QueryCompleteContext;
+import org.apache.impala.hooks.QueryEventHook;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+
+public class DummyQueryEventHook implements QueryEventHook {
+  private static final Logger LOG = LoggerFactory.getLogger(DummyQueryEventHook.class);
+
+  @Override
+  public void onImpalaStartup() {
+    LOG.info("{}.onImpalaStartup", this.getClass().getName());
+    try {
+      Files.write(Paths.get("/tmp/" + this.getClass().getName() + ".onImpalaStartup"),
+          "onImpalaStartup invoked".getBytes());
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  @Override
+  public void onQueryComplete(QueryCompleteContext context) {
+    LOG.info("{}.onQueryComplete", this.getClass().getName());
+    try {
+      Files.write(Paths.get("/tmp/" + this.getClass().getName() + ".onQueryComplete"),
+          "onQueryComplete invoked".getBytes());
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+}
diff --git a/fe/src/test/java/org/apache/impala/testutil/PostQueryErrorEventHook.java b/fe/src/test/java/org/apache/impala/testutil/PostQueryErrorEventHook.java
new file mode 100644
index 0000000..6712956
--- /dev/null
+++ b/fe/src/test/java/org/apache/impala/testutil/PostQueryErrorEventHook.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.impala.testutil;
+
+import org.apache.impala.hooks.QueryCompleteContext;
+import org.apache.impala.hooks.QueryEventHook;
+
+public class PostQueryErrorEventHook implements QueryEventHook {
+  @Override
+  public void onImpalaStartup() {
+  }
+
+  @Override
+  public void onQueryComplete(QueryCompleteContext context) {
+    throw new RuntimeException("intentional error");
+  }
+}
diff --git a/tests/authorization/test_provider.py b/tests/authorization/test_provider.py
index 9c7ad2d..4d0e671 100644
--- a/tests/authorization/test_provider.py
+++ b/tests/authorization/test_provider.py
@@ -65,7 +65,7 @@ class TestAuthorizationProvider(CustomClusterTestSuite):
     try:
       super(TestAuthorizationProvider, self).setup_method(method)
     except Exception:
-      pass
+      self._stop_impala_cluster()
 
   def teardown_method(self, method):
     # Explicitly override CustomClusterTestSuite.teardown_method() to
@@ -74,4 +74,4 @@ class TestAuthorizationProvider(CustomClusterTestSuite):
     try:
       super(TestAuthorizationProvider, self).teardown_method(method)
     except Exception:
-      pass
+      self._stop_impala_cluster()
diff --git a/tests/custom_cluster/test_query_event_hooks.py b/tests/custom_cluster/test_query_event_hooks.py
new file mode 100644
index 0000000..56ad5e4
--- /dev/null
+++ b/tests/custom_cluster/test_query_event_hooks.py
@@ -0,0 +1,202 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+# Client tests for Query Event Hooks
+
+import os
+import time
+import pytest
+import tempfile
+import logging
+
+from getpass import getuser
+from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
+from tests.common.file_utils import assert_file_in_dir_contains
+
+LOG = logging.getLogger(__name__)
+
+
+class TestHooks(CustomClusterTestSuite):
+  """
+  Tests for FE QueryEventHook invocations.
+
+  All test cases in this test suite share an impala log dir, so keep that in mind
+  if parsing any logs during your test assertions.
+  """
+  DUMMY_HOOK = "org.apache.impala.testutil.DummyQueryEventHook"
+  HOOK_POSTEXEC_FILE = "/tmp/{0}.onQueryComplete".format(DUMMY_HOOK)
+  HOOK_START_FILE = "/tmp/{0}.onImpalaStartup".format(DUMMY_HOOK)
+  MINIDUMP_PATH = tempfile.mkdtemp()
+  IMPALA_LOG_DIR = tempfile.mkdtemp(prefix="test_hooks_", dir=os.getenv("LOG_DIR"))
+
+  def teardown(self):
+    try:
+      os.remove(TestHooks.HOOK_START_FILE)
+    except OSError:
+      pass
+
+  def setup_method(self, method):
+    # Explicitly override CustomClusterTestSuite.setup_method() to
+    # clean up the dummy hook's onQueryComplete file
+    try:
+      os.remove(TestHooks.HOOK_START_FILE)
+      os.remove(TestHooks.HOOK_POSTEXEC_FILE)
+    except OSError:
+      pass
+    super(TestHooks, self).setup_method(method)
+
+  def teardown_method(self, method):
+    # Explicitly override CustomClusterTestSuite.teardown_method() to
+    # clean up the dummy hook's onQueryComplete file
+    super(TestHooks, self).teardown_method(method)
+    try:
+      os.remove(TestHooks.HOOK_START_FILE)
+      os.remove(TestHooks.HOOK_POSTEXEC_FILE)
+    except OSError:
+      pass
+
+  @pytest.mark.xfail(run=False, reason="IMPALA-8589")
+  @pytest.mark.execute_serially
+  @CustomClusterTestSuite.with_args(
+      impala_log_dir=IMPALA_LOG_DIR,
+      impalad_args="--query_event_hook_classes={0} "
+                   "--minidump_path={1}"
+                   .format(DUMMY_HOOK, MINIDUMP_PATH),
+      catalogd_args="--minidump_path={0}".format(MINIDUMP_PATH))
+  def test_query_event_hooks_execute(self, unique_database):
+    """
+    Tests that the post query execution hook actually executes by using a
+    dummy hook implementation that writes a file and testing for existence
+    of that file.
+
+    This test will perform queries needed to induce a hook event but should
+    clean up the db before completion.
+    """
+    user = getuser()
+    impala_client = self.create_impala_client()
+
+    # create closure over variables so we don't have to repeat them
+    def query(sql):
+      return impala_client.execute(sql, user=user)
+
+    # hook should be invoked at daemon startup
+    assert self.__wait_for_file(TestHooks.HOOK_START_FILE, timeout_s=10)
+
+    query("create table {0}.recipes (recipe_id int, recipe_name string)"
+          .format(unique_database))
+    query("insert into {0}.recipes (recipe_id, recipe_name) values "
+          "(1,'Tacos'), (2,'Tomato Soup'), (3,'Grilled Cheese')".format(unique_database))
+
+    # TODO (IMPALA-8572): need to end the session to trigger
+    # query unregister and therefore hook invocation.  can possibly remove
+    # after IMPALA-8572 completes
+    impala_client.close()
+
+    # dummy hook should create a file
+    assert self.__wait_for_file(TestHooks.HOOK_POSTEXEC_FILE, timeout_s=10)
+
+  def __wait_for_file(self, filepath, timeout_s=10):
+    if timeout_s < 0: return False
+    LOG.info("Waiting {0} s for file {1}".format(timeout_s, filepath))
+    for i in range(0, timeout_s):
+      LOG.info("Poll #{0} for file {1}".format(i, filepath))
+      if os.path.isfile(filepath):
+        LOG.info("Found file {0}".format(filepath))
+        return True
+      else:
+        time.sleep(1)
+    LOG.info("File {0} not found".format(filepath))
+    return False
+
+
+class TestHooksStartupFail(CustomClusterTestSuite):
+  """
+  Tests for failed startup due to bad QueryEventHook startup or registration
+
+  All test cases in this testsuite@pytest.mark.xfail(run=False, reason="IMPALA-8589")
+  are expected to fail cluster startup and will swallow exceptions thrown during
+  setup_method().
+  """
+
+  FAILING_HOOK = "org.apache.impala.testutil.AlwaysErrorQueryEventHook"
+  NONEXIST_HOOK = "captain.hook"
+  LOG_DIR1 = tempfile.mkdtemp(prefix="test_hooks_startup_fail_", dir=os.getenv("LOG_DIR"))
+  LOG_DIR2 = tempfile.mkdtemp(prefix="test_hooks_startup_fail_", dir=os.getenv("LOG_DIR"))
+  MINIDUMP_PATH = tempfile.mkdtemp()
+
+  @pytest.mark.xfail(run=False, reason="IMPALA-8589")
+  @pytest.mark.execute_serially
+  @CustomClusterTestSuite.with_args(
+      impala_log_dir=LOG_DIR1,
+      impalad_args="--query_event_hook_classes={0} "
+                   "--minidump_path={1}"
+                   .format(FAILING_HOOK, MINIDUMP_PATH),
+      catalogd_args="--minidump_path={0}".format(MINIDUMP_PATH))
+  def test_hook_startup_fail(self):
+    """
+    Tests that exception during QueryEventHook.onImpalaStart will prevent
+    successful daemon startup.
+
+    This is done by parsing the impala log file for a specific message
+    so is kind of brittle and maybe even prone to flakiness, depending
+    on how the log file is flushed.
+    """
+    # parse log file for expected exception
+    assert_file_in_dir_contains(TestHooksStartupFail.LOG_DIR1,
+                                "Exception during onImpalaStartup from "
+                                "QueryEventHook class {0}"
+                                .format(TestHooksStartupFail.FAILING_HOOK))
+
+  @pytest.mark.xfail(run=False, reason="IMPALA-8589")
+  @pytest.mark.execute_serially
+  @CustomClusterTestSuite.with_args(
+      impala_log_dir=LOG_DIR2,
+      impalad_args="--query_event_hook_classes={0} "
+                   "--minidump_path={1}"
+                   .format(NONEXIST_HOOK, MINIDUMP_PATH),
+      catalogd_args="--minidump_path={0}".format(MINIDUMP_PATH))
+  def test_hook_instantiation_fail(self):
+    """
+    Tests that failure to instantiate a QueryEventHook will prevent
+    successful daemon startup.
+
+    This is done by parsing the impala log file for a specific message
+    so is kind of brittle and maybe even prone to flakiness, depending
+    on how the log file is flushed.
+    """
+    # parse log file for expected exception
+    assert_file_in_dir_contains(TestHooksStartupFail.LOG_DIR2,
+                                "Unable to instantiate query event hook class {0}"
+                                .format(TestHooksStartupFail.NONEXIST_HOOK))
+
+  def setup_method(self, method):
+    # Explicitly override CustomClusterTestSuite.setup_method() to
+    # allow it to exception, since this test suite is for cases where
+    # startup fails
+    try:
+      super(TestHooksStartupFail, self).setup_method(method)
+    except Exception:
+      self._stop_impala_cluster()
+
+  def teardown_method(self, method):
+    # Explicitly override CustomClusterTestSuite.teardown_method() to
+    # allow it to exception, since it relies on setup_method() having
+    # completed successfully
+    try:
+      super(TestHooksStartupFail, self).teardown_method(method)
+    except Exception:
+      self._stop_impala_cluster()


[impala] 01/08: IMPALA-8400: Implement Ranger audit event handler

Posted by bo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

boroknagyz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 2750f0ab358ce754fb651a019fc06a071395395f
Author: Fredy Wijaya <fw...@cloudera.com>
AuthorDate: Fri May 10 14:46:27 2019 -0500

    IMPALA-8400: Implement Ranger audit event handler
    
    This patch implements Ranger audit event handler to behave similarly to
    the Hive/Ranger audit event handler, most notably:
    - Buffer the audit events during authorization and only flush them once
      the authorization is complete.
    - The audit will only add the event for the first failure.
    - Create an audit event handler per statement.
    
    Testing:
    - Added test cases in RangerAuditLogTest
    - Ran FE tests
    - Ran all E2E authorization tests
    
    Change-Id: Ife93c3e708875ef5fc0117153ad8ee225a88518b
    Reviewed-on: http://gerrit.cloudera.org:8080/13309
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 .../apache/impala/analysis/AnalysisContext.java    |   9 +-
 .../impala/authorization/AuthorizationChecker.java |  16 +-
 .../impala/authorization/AuthorizationContext.java |  30 +
 .../authorization/BaseAuthorizationChecker.java    |  64 +-
 .../authorization/NoopAuthorizationFactory.java    |   9 +-
 .../ranger/RangerAuthorizationChecker.java         | 101 ++-
 .../ranger/RangerAuthorizationContext.java         |  37 ++
 .../ranger/RangerBufferAuditHandler.java           | 117 ++++
 .../ranger/RangerCatalogdAuthorizationManager.java |  12 +-
 .../ranger/RangerImpaladAuthorizationManager.java  |   4 +-
 .../sentry/SentryAuthorizationChecker.java         |   9 +-
 .../authorization/AuthorizationStmtTest.java       | 724 +-------------------
 .../impala/authorization/AuthorizationTest.java    |   2 -
 .../authorization/AuthorizationTestBase.java       | 727 +++++++++++++++++++++
 .../authorization/ranger/RangerAuditLogTest.java   | 196 ++++++
 .../org/apache/impala/common/FrontendTestBase.java |  10 +-
 16 files changed, 1299 insertions(+), 768 deletions(-)

diff --git a/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java b/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java
index 90eee80..cdd456c 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AnalysisContext.java
@@ -25,6 +25,7 @@ import java.util.Set;
 
 import org.apache.impala.analysis.StmtMetadataLoader.StmtTableCache;
 import org.apache.impala.authorization.AuthorizationChecker;
+import org.apache.impala.authorization.AuthorizationContext;
 import org.apache.impala.authorization.AuthorizationFactory;
 import org.apache.impala.authorization.PrivilegeRequest;
 import org.apache.impala.authorization.AuthorizationException;
@@ -417,10 +418,16 @@ public class AnalysisContext {
     // Authorize statement and record exception. Authorization relies on information
     // collected during analysis.
     AuthorizationException authException = null;
+    AuthorizationContext authzCtx = null;
     try {
-      authzChecker.authorize(analysisResult_, catalog_);
+      authzCtx = authzChecker.createAuthorizationContext(true);
+      authzChecker.authorize(authzCtx, analysisResult_, catalog_);
     } catch (AuthorizationException e) {
       authException = e;
+    } finally {
+      if (authzCtx != null) {
+        authzChecker.postAuthorize(authzCtx);
+      }
     }
 
     // AuthorizationExceptions take precedence over AnalysisExceptions so as not
diff --git a/fe/src/main/java/org/apache/impala/authorization/AuthorizationChecker.java b/fe/src/main/java/org/apache/impala/authorization/AuthorizationChecker.java
index 5d4b1be..454cad3 100644
--- a/fe/src/main/java/org/apache/impala/authorization/AuthorizationChecker.java
+++ b/fe/src/main/java/org/apache/impala/authorization/AuthorizationChecker.java
@@ -35,12 +35,26 @@ public interface AuthorizationChecker {
   boolean hasAccess(User user, PrivilegeRequest request) throws InternalException;
 
   /**
+   * Creates a a new {@link AuthorizationContext}. {@link AuthorizationContext} gets
+   * created per authorization execution.
+   *
+   * @param doAudits a flag whether or not to do the audits
+   */
+  AuthorizationContext createAuthorizationContext(boolean doAudits);
+
+  /**
    * Authorize an analyzed statement.
    *
    * @throws AuthorizationException thrown if the user doesn't have sufficient privileges
    *                                to run this statement.
    */
-  void authorize(AnalysisResult analysisResult, FeCatalog catalog)
+  void authorize(AuthorizationContext authzCtx, AnalysisResult analysisResult,
+      FeCatalog catalog) throws AuthorizationException, InternalException;
+
+  /**
+   * This method is to be executed after an authorization check has occurred.
+   */
+  void postAuthorize(AuthorizationContext authzCtx)
       throws AuthorizationException, InternalException;
 
   /**
diff --git a/fe/src/main/java/org/apache/impala/authorization/AuthorizationContext.java b/fe/src/main/java/org/apache/impala/authorization/AuthorizationContext.java
new file mode 100644
index 0000000..3ed59ce
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/authorization/AuthorizationContext.java
@@ -0,0 +1,30 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.authorization;
+
+/**
+ * An authorization context class that is created per authorization check.
+ */
+public class AuthorizationContext {
+  private final long startTime_ = System.currentTimeMillis();
+
+  /**
+   * Gets the start time when the authorization check started.
+   */
+  public long getStartTime() { return startTime_; }
+}
diff --git a/fe/src/main/java/org/apache/impala/authorization/BaseAuthorizationChecker.java b/fe/src/main/java/org/apache/impala/authorization/BaseAuthorizationChecker.java
index b351307..02cb9cb 100644
--- a/fe/src/main/java/org/apache/impala/authorization/BaseAuthorizationChecker.java
+++ b/fe/src/main/java/org/apache/impala/authorization/BaseAuthorizationChecker.java
@@ -58,8 +58,16 @@ public abstract class BaseAuthorizationChecker implements AuthorizationChecker {
    * request, false otherwise. Always returns true if authorization is disabled or the
    * given user is an admin user.
    */
-  public boolean hasAccess(User user, PrivilegeRequest request)
-      throws InternalException {
+  @Override
+  public boolean hasAccess(User user, PrivilegeRequest request) throws InternalException {
+    // We don't want to do an audit log here. This method is used by "show databases",
+    // "show tables", "describe" to filter out unauthorized database, table, or column
+    // names.
+    return hasAccess(createAuthorizationContext(false), user, request);
+  }
+
+  private boolean hasAccess(AuthorizationContext authzCtx, User user,
+      PrivilegeRequest request) throws InternalException {
     Preconditions.checkNotNull(user);
     Preconditions.checkNotNull(request);
 
@@ -68,7 +76,17 @@ public abstract class BaseAuthorizationChecker implements AuthorizationChecker {
     if (!config_.isEnabled() || user instanceof ImpalaInternalAdminUser) {
       return true;
     }
-    return authorize(user, request);
+    return authorizeResource(authzCtx, user, request);
+  }
+
+  /**
+   * Executes code after the authorization check.
+   * Override this method to add custom post-authorization check.
+   */
+  @Override
+  public void postAuthorize(AuthorizationContext authzCtx) {
+    long durationMs = System.currentTimeMillis() - authzCtx.getStartTime();
+    LOG.debug("Authorization check took {} ms", durationMs);
   }
 
   /**
@@ -76,8 +94,9 @@ public abstract class BaseAuthorizationChecker implements AuthorizationChecker {
    * analyze() must have already been called. Throws an AuthorizationException if the
    * user doesn't have sufficient privileges to run this statement.
    */
-  public void authorize(AnalysisResult analysisResult, FeCatalog catalog)
-      throws AuthorizationException, InternalException {
+  @Override
+  public void authorize(AuthorizationContext authzCtx, AnalysisResult analysisResult,
+      FeCatalog catalog) throws AuthorizationException, InternalException {
     Preconditions.checkNotNull(analysisResult);
     Analyzer analyzer = analysisResult.getAnalyzer();
     // Authorize statements that may produce several hierarchical privilege requests.
@@ -114,20 +133,20 @@ public abstract class BaseAuthorizationChecker implements AuthorizationChecker {
 
       // Check any non-table, non-column privilege requests first.
       for (PrivilegeRequest request : otherPrivReqs) {
-        authorizePrivilegeRequest(analysisResult, catalog, request);
+        authorizePrivilegeRequest(authzCtx, analysisResult, catalog, request);
       }
 
       // Authorize table accesses, one table at a time, by considering both table and
       // column-level privilege requests.
       for (Map.Entry<String, List<PrivilegeRequest>> entry : tablePrivReqs.entrySet()) {
-        authorizeTableAccess(analysisResult, catalog, entry.getValue());
+        authorizeTableAccess(authzCtx, analysisResult, catalog, entry.getValue());
       }
     } else {
       for (PrivilegeRequest privReq : analyzer.getPrivilegeReqs()) {
         Preconditions.checkState(
             !(privReq.getAuthorizable().getType() == Authorizable.Type.COLUMN) ||
                 analysisResult.isSingleColumnPrivStmt());
-        authorizePrivilegeRequest(analysisResult, catalog, privReq);
+        authorizePrivilegeRequest(authzCtx, analysisResult, catalog, privReq);
       }
     }
 
@@ -138,7 +157,7 @@ public abstract class BaseAuthorizationChecker implements AuthorizationChecker {
     // 'user_has_profile_access' flag in queryCtx_.
     for (Pair<PrivilegeRequest, String> maskedReq : analyzer.getMaskedPrivilegeReqs()) {
       try {
-        authorizePrivilegeRequest(analysisResult, catalog, maskedReq.first);
+        authorizePrivilegeRequest(authzCtx, analysisResult, catalog, maskedReq.first);
       } catch (AuthorizationException e) {
         analysisResult.setUserHasProfileAccess(false);
         if (!Strings.isNullOrEmpty(maskedReq.second)) {
@@ -154,8 +173,9 @@ public abstract class BaseAuthorizationChecker implements AuthorizationChecker {
    * Throws an AuthorizationException if the user doesn't have sufficient privileges for
    * this request. Also, checks if the request references a system database.
    */
-  private void authorizePrivilegeRequest(AnalysisResult analysisResult, FeCatalog catalog,
-      PrivilegeRequest request) throws AuthorizationException, InternalException {
+  private void authorizePrivilegeRequest(AuthorizationContext authzCtx,
+      AnalysisResult analysisResult, FeCatalog catalog, PrivilegeRequest request)
+      throws AuthorizationException, InternalException {
     Preconditions.checkNotNull(request);
     String dbName = null;
     if (request.getAuthorizable() != null) {
@@ -166,7 +186,7 @@ public abstract class BaseAuthorizationChecker implements AuthorizationChecker {
     if (dbName != null && checkSystemDbAccess(catalog, dbName, request.getPrivilege())) {
       return;
     }
-    checkAccess(analysisResult.getAnalyzer().getUser(), request);
+    checkAccess(authzCtx, analysisResult.getAnalyzer().getUser(), request);
   }
 
   /**
@@ -177,8 +197,9 @@ public abstract class BaseAuthorizationChecker implements AuthorizationChecker {
    * privilege requests. Throws an AuthorizationException if the user doesn't have
    * sufficient privileges.
    */
-  private void authorizeTableAccess(AnalysisResult analysisResult, FeCatalog catalog,
-      List<PrivilegeRequest> requests) throws AuthorizationException, InternalException {
+  protected void authorizeTableAccess(AuthorizationContext authzCtx,
+      AnalysisResult analysisResult, FeCatalog catalog, List<PrivilegeRequest> requests)
+      throws AuthorizationException, InternalException {
     Preconditions.checkArgument(!requests.isEmpty());
     Analyzer analyzer = analysisResult.getAnalyzer();
     // We need to temporarily deny access when column masking or row filtering feature is
@@ -192,7 +213,7 @@ public abstract class BaseAuthorizationChecker implements AuthorizationChecker {
     for (PrivilegeRequest request: requests) {
       if (request.getAuthorizable().getType() == Authorizable.Type.TABLE) {
         try {
-          authorizePrivilegeRequest(analysisResult, catalog, request);
+          authorizePrivilegeRequest(authzCtx, analysisResult, catalog, request);
         } catch (AuthorizationException e) {
           // Authorization fails if we fail to authorize any table-level request that is
           // not a SELECT privilege (e.g. INSERT).
@@ -203,7 +224,7 @@ public abstract class BaseAuthorizationChecker implements AuthorizationChecker {
         Preconditions.checkState(
             request.getAuthorizable().getType() == Authorizable.Type.COLUMN);
         if (hasTableSelectPriv) continue;
-        if (hasAccess(analyzer.getUser(), request)) {
+        if (hasAccess(authzCtx, analyzer.getUser(), request)) {
           hasColumnSelectPriv = true;
           continue;
         }
@@ -246,11 +267,12 @@ public abstract class BaseAuthorizationChecker implements AuthorizationChecker {
    * Authorizes the PrivilegeRequest, throwing an Authorization exception if
    * the user does not have sufficient privileges.
    */
-  private void checkAccess(User user, PrivilegeRequest privilegeRequest)
+  private void checkAccess(AuthorizationContext authzCtx, User user,
+      PrivilegeRequest privilegeRequest)
       throws AuthorizationException, InternalException {
     Preconditions.checkNotNull(privilegeRequest);
 
-    if (hasAccess(user, privilegeRequest)) return;
+    if (hasAccess(authzCtx, user, privilegeRequest)) return;
 
     Privilege privilege = privilegeRequest.getPrivilege();
     if (privilegeRequest.getAuthorizable().getType() == Type.FUNCTION) {
@@ -292,10 +314,10 @@ public abstract class BaseAuthorizationChecker implements AuthorizationChecker {
   }
 
   /**
-   * Performs an authorization for a given user.
+   * Performs an authorization for a given user and resource.
    */
-  protected abstract boolean authorize(User user, PrivilegeRequest request)
-      throws InternalException;
+  protected abstract boolean authorizeResource(AuthorizationContext authzCtx, User user,
+      PrivilegeRequest request) throws InternalException;
 
   /**
    * Returns a set of groups for a given user.
diff --git a/fe/src/main/java/org/apache/impala/authorization/NoopAuthorizationFactory.java b/fe/src/main/java/org/apache/impala/authorization/NoopAuthorizationFactory.java
index 7ac58aa..6f6a571 100644
--- a/fe/src/main/java/org/apache/impala/authorization/NoopAuthorizationFactory.java
+++ b/fe/src/main/java/org/apache/impala/authorization/NoopAuthorizationFactory.java
@@ -184,8 +184,8 @@ public class NoopAuthorizationFactory implements AuthorizationFactory {
   public AuthorizationChecker newAuthorizationChecker(AuthorizationPolicy authzPolicy) {
     return new BaseAuthorizationChecker(authzConfig_) {
       @Override
-      protected boolean authorize(User user, PrivilegeRequest request)
-          throws InternalException {
+      protected boolean authorizeResource(AuthorizationContext authzCtx, User user,
+          PrivilegeRequest request) throws InternalException {
         return true;
       }
 
@@ -202,6 +202,11 @@ public class NoopAuthorizationFactory implements AuthorizationFactory {
 
       @Override
       public void invalidateAuthorizationCache() {}
+
+      @Override
+      public AuthorizationContext createAuthorizationContext(boolean doAudits) {
+        return new AuthorizationContext();
+      }
     };
   }
 
diff --git a/fe/src/main/java/org/apache/impala/authorization/ranger/RangerAuthorizationChecker.java b/fe/src/main/java/org/apache/impala/authorization/ranger/RangerAuthorizationChecker.java
index a985ddf..98ba01a 100644
--- a/fe/src/main/java/org/apache/impala/authorization/ranger/RangerAuthorizationChecker.java
+++ b/fe/src/main/java/org/apache/impala/authorization/ranger/RangerAuthorizationChecker.java
@@ -20,18 +20,21 @@ package org.apache.impala.authorization.ranger;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.impala.analysis.AnalysisContext.AnalysisResult;
 import org.apache.impala.authorization.Authorizable;
 import org.apache.impala.authorization.Authorizable.Type;
 import org.apache.impala.authorization.AuthorizationChecker;
 import org.apache.impala.authorization.AuthorizationConfig;
+import org.apache.impala.authorization.AuthorizationContext;
 import org.apache.impala.authorization.AuthorizationException;
 import org.apache.impala.authorization.BaseAuthorizationChecker;
 import org.apache.impala.authorization.DefaultAuthorizableFactory;
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.authorization.PrivilegeRequest;
 import org.apache.impala.authorization.User;
+import org.apache.impala.catalog.FeCatalog;
 import org.apache.impala.common.InternalException;
-import org.apache.ranger.plugin.audit.RangerDefaultAuditHandler;
+import org.apache.ranger.audit.model.AuthzAuditEvent;
 import org.apache.ranger.plugin.policyengine.RangerAccessRequest;
 import org.apache.ranger.plugin.policyengine.RangerAccessRequestImpl;
 import org.apache.ranger.plugin.policyengine.RangerAccessResourceImpl;
@@ -45,6 +48,7 @@ import java.util.EnumSet;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
+import java.util.stream.Collectors;
 
 /**
  * An implementation of {@link AuthorizationChecker} that uses Ranger.
@@ -60,24 +64,24 @@ public class RangerAuthorizationChecker extends BaseAuthorizationChecker {
   public static final String UPDATE_ACCESS_TYPE = "update";
   public static final String SELECT_ACCESS_TYPE = "select";
 
-  private final RangerDefaultAuditHandler auditHandler_;
   private final RangerImpalaPlugin plugin_;
 
   public RangerAuthorizationChecker(AuthorizationConfig authzConfig) {
     super(authzConfig);
     Preconditions.checkArgument(authzConfig instanceof RangerAuthorizationConfig);
     RangerAuthorizationConfig rangerConfig = (RangerAuthorizationConfig) authzConfig;
-    auditHandler_ = new RangerDefaultAuditHandler();
     plugin_ = new RangerImpalaPlugin(
         rangerConfig.getServiceType(), rangerConfig.getAppId());
     plugin_.init();
   }
 
   @Override
-  protected boolean authorize(User user, PrivilegeRequest request)
-      throws InternalException {
+  protected boolean authorizeResource(AuthorizationContext authzCtx, User user,
+      PrivilegeRequest request) throws InternalException {
+    Preconditions.checkArgument(authzCtx instanceof RangerAuthorizationContext);
     Preconditions.checkNotNull(user);
     Preconditions.checkNotNull(request);
+    RangerAuthorizationContext rangerAuthzCtx = (RangerAuthorizationContext) authzCtx;
     List<RangerAccessResourceImpl> resources = new ArrayList<>();
     Authorizable authorizable = request.getAuthorizable();
     switch (authorizable.getType()) {
@@ -144,13 +148,15 @@ public class RangerAuthorizationChecker extends BaseAuthorizationChecker {
 
     for (RangerAccessResourceImpl resource: resources) {
       if (request.getPrivilege() == Privilege.ANY) {
-        if (!authorize(resource, user, request.getPrivilege())) {
+        if (!authorizeResource(rangerAuthzCtx, resource, user, request.getPrivilege())) {
           return false;
         }
       } else {
         boolean authorized = request.getPrivilege().hasAnyOf() ?
-            authorizeAny(resource, user, request.getPrivilege().getImpliedPrivileges()) :
-            authorizeAll(resource, user, request.getPrivilege().getImpliedPrivileges());
+            authorizeAny(rangerAuthzCtx, resource, user,
+                request.getPrivilege().getImpliedPrivileges()) :
+            authorizeAll(rangerAuthzCtx, resource, user,
+                request.getPrivilege().getImpliedPrivileges());
         if (!authorized) {
           return false;
         }
@@ -160,6 +166,15 @@ public class RangerAuthorizationChecker extends BaseAuthorizationChecker {
   }
 
   @Override
+  public void postAuthorize(AuthorizationContext authzCtx) {
+    Preconditions.checkArgument(authzCtx instanceof RangerAuthorizationContext);
+    super.postAuthorize(authzCtx);
+    RangerBufferAuditHandler auditHandler =
+        ((RangerAuthorizationContext) authzCtx).getAuditHandler();
+    auditHandler.flush();
+  }
+
+  @Override
   protected void authorizeRowFilterAndColumnMask(User user,
       List<PrivilegeRequest> privilegeRequests)
       throws AuthorizationException, InternalException {
@@ -190,6 +205,51 @@ public class RangerAuthorizationChecker extends BaseAuthorizationChecker {
     }
   }
 
+  @Override
+  public AuthorizationContext createAuthorizationContext(boolean doAudits) {
+    RangerAuthorizationContext authzCtx = new RangerAuthorizationContext();
+    if (doAudits) {
+      // Any statement that goes through {@link authorize} will need to have audit logs.
+      authzCtx.setAuditHandler(new RangerBufferAuditHandler());
+    }
+    return authzCtx;
+  }
+
+  @Override
+  protected void authorizeTableAccess(AuthorizationContext authzCtx,
+      AnalysisResult analysisResult, FeCatalog catalog, List<PrivilegeRequest> requests)
+      throws AuthorizationException, InternalException {
+    RangerAuthorizationContext originalCtx = (RangerAuthorizationContext) authzCtx;
+    // case 1: table (select) OK --> add the table event
+    // case 2: table (non-select) ERROR --> add the table event
+    // case 3: table (select) ERROR, columns (select) OK -> only add the column events
+    // case 4: table (select) ERROR, columns (select) ERROR --> only add the first column
+    //                                                          event
+    RangerAuthorizationContext tmpCtx = new RangerAuthorizationContext();
+    tmpCtx.setAuditHandler(new RangerBufferAuditHandler());
+    try {
+      super.authorizeTableAccess(tmpCtx, analysisResult, catalog, requests);
+    } catch (AuthorizationException e) {
+      tmpCtx.getAuditHandler().getAuthzEvents().stream()
+          .filter(evt ->
+              // case 2: get the first failing non-select table
+              (!"select".equals(evt.getAccessType()) &&
+                  "@table".equals(evt.getResourceType())) ||
+              // case 4: get the first failing column
+              ("@column".equals(evt.getResourceType()) && evt.getAccessResult() == 0))
+          .findFirst()
+          .ifPresent(evt -> originalCtx.getAuditHandler().getAuthzEvents().add(evt));
+      throw e;
+    } finally {
+      // case 1 & 4: we only add the successful events. The first table-level access
+      // check is only for the short-circuit, we don't want to add an event for that.
+      List<AuthzAuditEvent> events = tmpCtx.getAuditHandler().getAuthzEvents().stream()
+          .filter(evt -> evt.getAccessResult() != 0)
+          .collect(Collectors.toList());
+      originalCtx.getAuditHandler().getAuthzEvents().addAll(events);
+    }
+  }
+
   /**
    * This method checks if column mask is enabled on the given columns and deny access
    * when column mask is enabled by throwing an {@link AuthorizationException}. This is
@@ -204,7 +264,7 @@ public class RangerAuthorizationChecker extends BaseAuthorizationChecker {
         .build();
     RangerAccessRequest req = new RangerAccessRequestImpl(resource,
         SELECT_ACCESS_TYPE, user.getShortName(), getUserGroups(user));
-    if (plugin_.evalDataMaskPolicies(req, auditHandler_).isMaskEnabled()) {
+    if (plugin_.evalDataMaskPolicies(req, null).isMaskEnabled()) {
       throw new AuthorizationException(String.format(
           "Impala does not support column masking yet. Column masking is enabled on " +
               "column: %s.%s.%s", dbName, tableName, columnName));
@@ -224,7 +284,7 @@ public class RangerAuthorizationChecker extends BaseAuthorizationChecker {
         .build();
     RangerAccessRequest req = new RangerAccessRequestImpl(resource,
         SELECT_ACCESS_TYPE, user.getShortName(), getUserGroups(user));
-    if (plugin_.evalRowFilterPolicies(req, auditHandler_).isRowFilterEnabled()) {
+    if (plugin_.evalRowFilterPolicies(req, null).isRowFilterEnabled()) {
       throw new AuthorizationException(String.format(
           "Impala does not support row filtering yet. Row filtering is enabled " +
               "on table: %s.%s", dbName, tableName));
@@ -237,29 +297,31 @@ public class RangerAuthorizationChecker extends BaseAuthorizationChecker {
     return new HashSet<>(ugi.getGroups());
   }
 
-  private boolean authorizeAny(RangerAccessResourceImpl resource, User user,
-      EnumSet<Privilege> privileges) throws InternalException {
+  private boolean authorizeAny(RangerAuthorizationContext authzCtx,
+      RangerAccessResourceImpl resource, User user, EnumSet<Privilege> privileges)
+      throws InternalException {
     for (Privilege privilege: privileges) {
-      if (authorize(resource, user, privilege)) {
+      if (authorizeResource(authzCtx, resource, user, privilege)) {
         return true;
       }
     }
     return false;
   }
 
-  private boolean authorizeAll(RangerAccessResourceImpl resource, User user,
-      EnumSet<Privilege> privileges)
+  private boolean authorizeAll(RangerAuthorizationContext authzCtx,
+      RangerAccessResourceImpl resource, User user, EnumSet<Privilege> privileges)
       throws InternalException {
     for (Privilege privilege: privileges) {
-      if (!authorize(resource, user, privilege)) {
+      if (!authorizeResource(authzCtx, resource, user, privilege)) {
         return false;
       }
     }
     return true;
   }
 
-  private boolean authorize(RangerAccessResourceImpl resource, User user,
-      Privilege privilege) throws InternalException {
+  private boolean authorizeResource(RangerAuthorizationContext authzCtx,
+      RangerAccessResourceImpl resource, User user, Privilege privilege)
+      throws InternalException {
     String accessType;
     if (privilege == Privilege.ANY) {
       accessType = RangerPolicyEngine.ANY_ACCESS;
@@ -271,7 +333,8 @@ public class RangerAuthorizationChecker extends BaseAuthorizationChecker {
     }
     RangerAccessRequest request = new RangerAccessRequestImpl(resource,
         accessType, user.getShortName(), getUserGroups(user));
-    RangerAccessResult result = plugin_.isAccessAllowed(request);
+    RangerAccessResult result = plugin_.isAccessAllowed(request,
+        authzCtx.getAuditHandler());
     return result != null && result.getIsAllowed();
   }
 
diff --git a/fe/src/main/java/org/apache/impala/authorization/ranger/RangerAuthorizationContext.java b/fe/src/main/java/org/apache/impala/authorization/ranger/RangerAuthorizationContext.java
new file mode 100644
index 0000000..4b69ce9
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/authorization/ranger/RangerAuthorizationContext.java
@@ -0,0 +1,37 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.authorization.ranger;
+
+import com.google.common.base.Preconditions;
+import org.apache.impala.authorization.AuthorizationContext;
+
+import javax.annotation.Nullable;
+
+/**
+ * Ranger specific {@link AuthorizationContext}.
+ */
+public class RangerAuthorizationContext extends AuthorizationContext {
+  // Audit handler can be null meaning we don't want to do an audit log.
+  private @Nullable RangerBufferAuditHandler auditHandler_;
+
+  public void setAuditHandler(RangerBufferAuditHandler auditHandler) {
+    auditHandler_ = Preconditions.checkNotNull(auditHandler);
+  }
+
+  public RangerBufferAuditHandler getAuditHandler() { return auditHandler_; }
+}
diff --git a/fe/src/main/java/org/apache/impala/authorization/ranger/RangerBufferAuditHandler.java b/fe/src/main/java/org/apache/impala/authorization/ranger/RangerBufferAuditHandler.java
new file mode 100644
index 0000000..8beefa3
--- /dev/null
+++ b/fe/src/main/java/org/apache/impala/authorization/ranger/RangerBufferAuditHandler.java
@@ -0,0 +1,117 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.authorization.ranger;
+
+import org.apache.ranger.audit.model.AuthzAuditEvent;
+import org.apache.ranger.plugin.audit.RangerDefaultAuditHandler;
+import org.apache.ranger.plugin.policyengine.RangerAccessRequest;
+import org.apache.ranger.plugin.policyengine.RangerAccessResource;
+import org.apache.ranger.plugin.policyengine.RangerAccessResult;
+import org.apache.ranger.plugin.policyengine.RangerAccessResultProcessor;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Optional;
+
+/**
+ * Impala implementation of {@link RangerDefaultAuditHandler}. This audit handler batches
+ * the audit events and flush them at the end via an explicit {@link #flush()} ()} method.
+ * Most of the implementation here was copied from Hive/Ranger plugin code.
+ *
+ * This class is scoped once per-statement and the instance is not meant to be used by
+ * multiple threads.
+ */
+public class RangerBufferAuditHandler implements RangerAccessResultProcessor {
+  private final RangerDefaultAuditHandler auditHandler_ = new RangerDefaultAuditHandler();
+  private final List<AuthzAuditEvent> auditEvents_ = new ArrayList<>();
+
+  public static class AutoFlush extends RangerBufferAuditHandler
+      implements AutoCloseable {
+    @Override
+    public void close() {
+      super.flush();
+    }
+  }
+
+  /**
+   * Creates an instance of {@link RangerBufferAuditHandler} that will do an auto-flush.
+   * Use it with try-resource.
+   */
+  public static AutoFlush autoFlush() {
+    return new AutoFlush();
+  }
+
+  @Override
+  public void processResult(RangerAccessResult result) {
+    processResults(Collections.singletonList(result));
+  }
+
+  @Override
+  public void processResults(Collection<RangerAccessResult> results) {
+    auditEvents_.addAll(createAuditEvents(results));
+  }
+
+  /**
+   * Flushes the audit events.
+   */
+  public void flush() {
+    // When the first a failure, we only want to log the first failure.
+    Optional<AuthzAuditEvent> firstFailure = auditEvents_.stream()
+        .filter(evt -> evt.getAccessResult() == 0)
+        .findFirst();
+    if (firstFailure.isPresent()) {
+      auditEvents_.clear();
+      auditEvents_.add(firstFailure.get());
+    }
+    auditEvents_.forEach(event -> auditHandler_.logAuthzAudit(event));
+  }
+
+  private AuthzAuditEvent createAuditEvent(RangerAccessResult result) {
+    RangerAccessRequest request = result.getAccessRequest();
+    RangerAccessResource resource = request.getResource();
+    String resourceType = resource != null ? resource.getLeafName() : null;
+
+    AuthzAuditEvent auditEvent = auditHandler_.getAuthzEvents(result);
+    auditEvent.setAccessType(request.getAccessType());
+    auditEvent.setResourcePath(resource != null ? resource.getAsString() : null);
+    if (resourceType != null) {
+      auditEvent.setResourceType("@" + resourceType);
+    }
+    return auditEvent;
+  }
+
+  /**
+   * Creates list of {@link AuthzAuditEvent} for a given list of
+   * {@link RangerAccessResult}. Non-auditable results will be ignored. If there is
+   * at least one access denied error, only that event will returned. Multiple policies
+   * with the same policy ID will be grouped together.
+   */
+  private List<AuthzAuditEvent> createAuditEvents(
+      Collection<RangerAccessResult> results) {
+    List<AuthzAuditEvent> auditEvents = new ArrayList<>();
+    for (RangerAccessResult result : results) {
+      if (!result.getIsAudited()) continue; // ignore non-auditable result
+      auditEvents.add(createAuditEvent(result));
+    }
+    return auditEvents;
+  }
+
+  protected List<AuthzAuditEvent> getAuthzEvents() { return auditEvents_; }
+}
diff --git a/fe/src/main/java/org/apache/impala/authorization/ranger/RangerCatalogdAuthorizationManager.java b/fe/src/main/java/org/apache/impala/authorization/ranger/RangerCatalogdAuthorizationManager.java
index 44d48db..7bb6aaf 100644
--- a/fe/src/main/java/org/apache/impala/authorization/ranger/RangerCatalogdAuthorizationManager.java
+++ b/fe/src/main/java/org/apache/impala/authorization/ranger/RangerCatalogdAuthorizationManager.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.impala.authorization.AuthorizationDelta;
 import org.apache.impala.authorization.AuthorizationManager;
 import org.apache.impala.authorization.User;
+import org.apache.impala.authorization.ranger.RangerBufferAuditHandler.AutoFlush;
 import org.apache.impala.catalog.AuthzCacheInvalidation;
 import org.apache.impala.catalog.CatalogServiceCatalog;
 import org.apache.impala.common.ImpalaException;
@@ -38,7 +39,6 @@ import org.apache.impala.thrift.TShowGrantPrincipalParams;
 import org.apache.impala.thrift.TShowRolesParams;
 import org.apache.impala.thrift.TShowRolesResult;
 import org.apache.impala.util.ClassUtil;
-import org.apache.ranger.plugin.audit.RangerDefaultAuditHandler;
 import org.apache.ranger.plugin.util.GrantRevokeRequest;
 
 import java.util.ArrayList;
@@ -59,13 +59,11 @@ import java.util.function.Supplier;
 public class RangerCatalogdAuthorizationManager implements AuthorizationManager {
   private static final String AUTHZ_CACHE_INVALIDATION_MARKER = "ranger";
 
-  private final RangerDefaultAuditHandler auditHandler_;
   private final Supplier<RangerImpalaPlugin> plugin_;
   private final CatalogServiceCatalog catalog_;
 
   public RangerCatalogdAuthorizationManager(Supplier<RangerImpalaPlugin> pluginSupplier,
       CatalogServiceCatalog catalog) {
-    auditHandler_ = new RangerDefaultAuditHandler();
     plugin_ = pluginSupplier;
     catalog_ = catalog;
   }
@@ -168,7 +166,9 @@ public class RangerCatalogdAuthorizationManager implements AuthorizationManager
   public void grantPrivilege(List<GrantRevokeRequest> requests) throws ImpalaException {
     try {
       for (GrantRevokeRequest request : requests) {
-        plugin_.get().grantAccess(request, auditHandler_);
+        try (AutoFlush auditHandler = RangerBufferAuditHandler.autoFlush()) {
+          plugin_.get().grantAccess(request, auditHandler);
+        }
       }
     } catch (Exception e) {
       throw new InternalException(e.getMessage());
@@ -179,7 +179,9 @@ public class RangerCatalogdAuthorizationManager implements AuthorizationManager
   public void revokePrivilege(List<GrantRevokeRequest> requests) throws ImpalaException {
     try {
       for (GrantRevokeRequest request : requests) {
-        plugin_.get().revokeAccess(request, auditHandler_);
+        try (AutoFlush auditHandler = RangerBufferAuditHandler.autoFlush()) {
+          plugin_.get().revokeAccess(request, auditHandler);
+        }
       }
     } catch (Exception e) {
       throw new InternalException(e.getMessage());
diff --git a/fe/src/main/java/org/apache/impala/authorization/ranger/RangerImpaladAuthorizationManager.java b/fe/src/main/java/org/apache/impala/authorization/ranger/RangerImpaladAuthorizationManager.java
index c84a496..a4ea9df 100644
--- a/fe/src/main/java/org/apache/impala/authorization/ranger/RangerImpaladAuthorizationManager.java
+++ b/fe/src/main/java/org/apache/impala/authorization/ranger/RangerImpaladAuthorizationManager.java
@@ -370,8 +370,8 @@ public class RangerImpaladAuthorizationManager implements AuthorizationManager {
 
   @Override
   public AuthorizationDelta refreshAuthorization(boolean resetVersions) {
-    // TODO: IMPALA-8293 (part 2)
-    return new AuthorizationDelta();
+    throw new UnsupportedOperationException(String.format(
+        "%s is not supported in Impalad", ClassUtil.getMethodName()));
   }
 
   private static class RangerResultRow {
diff --git a/fe/src/main/java/org/apache/impala/authorization/sentry/SentryAuthorizationChecker.java b/fe/src/main/java/org/apache/impala/authorization/sentry/SentryAuthorizationChecker.java
index 7b6cb76..288a3e5 100644
--- a/fe/src/main/java/org/apache/impala/authorization/sentry/SentryAuthorizationChecker.java
+++ b/fe/src/main/java/org/apache/impala/authorization/sentry/SentryAuthorizationChecker.java
@@ -21,6 +21,7 @@ import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import org.apache.impala.authorization.Authorizable.Type;
 import org.apache.impala.authorization.AuthorizationConfig;
+import org.apache.impala.authorization.AuthorizationContext;
 import org.apache.impala.authorization.AuthorizationException;
 import org.apache.impala.authorization.BaseAuthorizationChecker;
 import org.apache.impala.authorization.Privilege;
@@ -85,6 +86,11 @@ public class SentryAuthorizationChecker extends BaseAuthorizationChecker {
     // Authorization refresh in Sentry is done by updating {@link AuthorizationPolicy}.
   }
 
+  @Override
+  public AuthorizationContext createAuthorizationContext(boolean doAudits) {
+    return new AuthorizationContext();
+  }
+
   /*
    * Creates a new ResourceAuthorizationProvider based on the given configuration.
    */
@@ -94,7 +100,8 @@ public class SentryAuthorizationChecker extends BaseAuthorizationChecker {
   }
 
   @Override
-  public boolean authorize(User user, PrivilegeRequest request) throws InternalException {
+  public boolean authorizeResource(AuthorizationContext authzCtx, User user,
+      PrivilegeRequest request) throws InternalException {
     EnumSet<ImpalaAction> actions = ImpalaAction.from(request.getPrivilege());
 
     List<DBModelAuthorizable> authorizables = Lists.newArrayList(
diff --git a/fe/src/test/java/org/apache/impala/authorization/AuthorizationStmtTest.java b/fe/src/test/java/org/apache/impala/authorization/AuthorizationStmtTest.java
index b151a56..cc1ce76 100644
--- a/fe/src/test/java/org/apache/impala/authorization/AuthorizationStmtTest.java
+++ b/fe/src/test/java/org/apache/impala/authorization/AuthorizationStmtTest.java
@@ -17,130 +17,46 @@
 
 package org.apache.impala.authorization;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import com.sun.jersey.api.client.ClientResponse;
+import com.google.common.collect.Sets;
 import org.apache.commons.lang.ArrayUtils;
 import org.apache.impala.analysis.AnalysisContext;
 import org.apache.impala.analysis.AnalysisContext.AnalysisResult;
-import org.apache.impala.authorization.ranger.RangerAuthorizationChecker;
-import org.apache.impala.authorization.ranger.RangerAuthorizationConfig;
-import org.apache.impala.authorization.ranger.RangerAuthorizationFactory;
-import org.apache.impala.authorization.ranger.RangerCatalogdAuthorizationManager;
-import org.apache.impala.authorization.ranger.RangerImpalaPlugin;
-import org.apache.impala.authorization.ranger.RangerImpalaResourceBuilder;
-import org.apache.impala.authorization.sentry.SentryAuthorizationConfig;
-import org.apache.impala.authorization.sentry.SentryAuthorizationFactory;
-import org.apache.impala.authorization.sentry.SentryPolicyService;
-import org.apache.impala.catalog.Role;
 import org.apache.impala.catalog.ScalarFunction;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
-import org.apache.impala.common.FrontendTestBase;
 import org.apache.impala.common.ImpalaException;
 import org.apache.impala.common.RuntimeEnv;
-import org.apache.impala.service.Frontend;
-import org.apache.impala.testutil.ImpaladTestCatalog;
-import org.apache.impala.thrift.TColumnValue;
 import org.apache.impala.thrift.TDescribeOutputStyle;
-import org.apache.impala.thrift.TDescribeResult;
-import org.apache.impala.thrift.TFunctionBinaryType;
-import org.apache.impala.thrift.TPrincipalType;
-import org.apache.impala.thrift.TPrivilege;
 import org.apache.impala.thrift.TPrivilegeLevel;
-import org.apache.impala.thrift.TPrivilegeScope;
 import org.apache.impala.thrift.TQueryOptions;
-import org.apache.impala.thrift.TResultRow;
 import org.apache.impala.thrift.TTableName;
-import org.apache.ranger.plugin.util.GrantRevokeRequest;
-import org.apache.ranger.plugin.util.RangerRESTClient;
-import org.apache.ranger.plugin.util.RangerRESTUtils;
 import org.apache.sentry.api.service.thrift.TSentryRole;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Sets;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
 
-import javax.ws.rs.core.Response.Status.Family;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 /**
  * This class contains authorization tests for SQL statements.
  */
 @RunWith(Parameterized.class)
-public class AuthorizationStmtTest extends FrontendTestBase {
-  private static final String RANGER_ADMIN_URL = "http://localhost:6080";
-  private static final String RANGER_USER = "admin";
-  private static final String RANGER_PASSWORD = "admin";
-  private static final String SERVER_NAME = "server1";
-  private static final User USER = new User(System.getProperty("user.name"));
-  private static final String RANGER_SERVICE_TYPE = "hive";
-  private static final String RANGER_SERVICE_NAME = "test_impala";
-  private static final String RANGER_APP_ID = "impala";
-  private static final User RANGER_ADMIN = new User("admin");
-
-  private final AuthorizationConfig authzConfig_;
-  private final AuthorizationFactory authzFactory_;
-  private final AuthorizationProvider authzProvider_;
-  private final AnalysisContext authzCtx_;
-  private final SentryPolicyService sentryService_;
-  private final ImpaladTestCatalog authzCatalog_;
-  private final Frontend authzFrontend_;
-  private final RangerImpalaPlugin rangerImpalaPlugin_;
-  private final RangerRESTClient rangerRestClient_;
-
+public class AuthorizationStmtTest extends AuthorizationTestBase {
   public AuthorizationStmtTest(AuthorizationProvider authzProvider)
       throws ImpalaException {
-    authzProvider_ = authzProvider;
-    switch (authzProvider) {
-      case SENTRY:
-        authzConfig_ = SentryAuthorizationConfig.createHadoopGroupAuthConfig(
-            "server1",
-            System.getenv("IMPALA_HOME") + "/fe/src/test/resources/sentry-site.xml");
-        authzFactory_ = new SentryAuthorizationFactory(authzConfig_);
-        authzCtx_ = createAnalysisCtx(authzFactory_, USER.getName());
-        authzCatalog_ = new ImpaladTestCatalog(authzFactory_);
-        authzFrontend_ = new Frontend(authzFactory_, authzCatalog_);
-        sentryService_ = new SentryPolicyService(
-            ((SentryAuthorizationConfig) authzConfig_).getSentryConfig());
-        rangerImpalaPlugin_ = null;
-        rangerRestClient_ = null;
-        break;
-      case RANGER:
-        authzConfig_ = new RangerAuthorizationConfig(RANGER_SERVICE_TYPE, RANGER_APP_ID,
-            SERVER_NAME);
-        authzFactory_ = new RangerAuthorizationFactory(authzConfig_);
-        authzCtx_ = createAnalysisCtx(authzFactory_, USER.getName());
-        authzCatalog_ = new ImpaladTestCatalog(authzFactory_);
-        authzFrontend_ = new Frontend(authzFactory_, authzCatalog_);
-        rangerImpalaPlugin_ =
-            ((RangerAuthorizationChecker) authzFrontend_.getAuthzChecker())
-                .getRangerImpalaPlugin();
-        sentryService_ = null;
-        rangerRestClient_ = new RangerRESTClient(RANGER_ADMIN_URL, null);
-        rangerRestClient_.setBasicAuthInfo(RANGER_USER, RANGER_PASSWORD);
-        break;
-      default:
-        throw new IllegalArgumentException(String.format(
-            "Unsupported authorization provider: %s", authzProvider));
-    }
+    super(authzProvider);
   }
 
   @BeforeClass
@@ -3059,622 +2975,6 @@ public class AuthorizationStmtTest extends FrontendTestBase {
     }
   }
 
-  private void createRangerPolicy(String policyName, String json) {
-    ClientResponse response = rangerRestClient_
-        .getResource("/service/public/v2/api/policy")
-        .accept(RangerRESTUtils.REST_MIME_TYPE_JSON)
-        .type(RangerRESTUtils.REST_MIME_TYPE_JSON)
-        .post(ClientResponse.class, json);
-    if (response.getStatusInfo().getFamily() != Family.SUCCESSFUL) {
-      throw new RuntimeException(
-          String.format("Unable to create a Ranger policy: %s.", policyName));
-    }
-  }
-
-  private void deleteRangerPolicy(String policyName) {
-    ClientResponse response = rangerRestClient_
-        .getResource("/service/public/v2/api/policy")
-        .queryParam("servicename", RANGER_SERVICE_NAME)
-        .queryParam("policyname", policyName)
-        .delete(ClientResponse.class);
-    if (response.getStatusInfo().getFamily() != Family.SUCCESSFUL) {
-      throw new RuntimeException(
-          String.format("Unable to delete Ranger policy: %s.", policyName));
-    }
-  }
-
-  // Convert TDescribeResult to list of strings.
-  private static List<String> resultToStringList(TDescribeResult result) {
-    List<String> list = new ArrayList<>();
-    for (TResultRow row: result.getResults()) {
-      for (TColumnValue col: row.getColVals()) {
-        list.add(col.getString_val() == null ? "NULL": col.getString_val().trim());
-      }
-    }
-    return list;
-  }
-
-  private static String selectError(String object) {
-    return "User '%s' does not have privileges to execute 'SELECT' on: " + object;
-  }
-
-  private static String insertError(String object) {
-    return "User '%s' does not have privileges to execute 'INSERT' on: " + object;
-  }
-
-  private static String createError(String object) {
-    return "User '%s' does not have privileges to execute 'CREATE' on: " + object;
-  }
-
-  private static String alterError(String object) {
-    return "User '%s' does not have privileges to execute 'ALTER' on: " + object;
-  }
-
-  private static String dropError(String object) {
-    return "User '%s' does not have privileges to execute 'DROP' on: " + object;
-  }
-
-  private static String accessError(String object) {
-    return accessError(false, object);
-  }
-
-  private static String accessError(boolean grantOption, String object) {
-    return "User '%s' does not have privileges" +
-        (grantOption ? " with 'GRANT OPTION'" : "") + " to access: " + object;
-  }
-
-  private static String refreshError(String object) {
-    return "User '%s' does not have privileges to execute " +
-        "'INVALIDATE METADATA/REFRESH' on: " + object;
-  }
-
-  private static String systemDbError() {
-    return "Cannot modify system database.";
-  }
-
-  private static String viewDefError(String object) {
-    return "User '%s' does not have privileges to see the definition of view '" +
-        object + "'.";
-  }
-
-  private static String createFunctionError(String object) {
-    return "User '%s' does not have privileges to CREATE functions in: " + object;
-  }
-
-  private static String dropFunctionError(String object) {
-    return "User '%s' does not have privileges to DROP functions in: " + object;
-  }
-
-  private static String columnMaskError(String object) {
-    return "Impala does not support column masking yet. Column masking is enabled on " +
-        "column: " + object;
-  }
-
-  private static String rowFilterError(String object) {
-    return "Impala does not support row filtering yet. Row filtering is enabled on " +
-        "table: " + object;
-  }
-
-  private ScalarFunction addFunction(String db, String fnName, List<Type> argTypes,
-      Type retType, String uriPath, String symbolName) {
-    ScalarFunction fn = ScalarFunction.createForTesting(db, fnName, argTypes, retType,
-        uriPath, symbolName, null, null, TFunctionBinaryType.NATIVE);
-    authzCatalog_.addFunction(fn);
-    return fn;
-  }
-
-  private ScalarFunction addFunction(String db, String fnName) {
-    return addFunction(db, fnName, new ArrayList<>(), Type.INT, "/dummy",
-        "dummy.class");
-  }
-
-  private void removeFunction(ScalarFunction fn) {
-    authzCatalog_.removeFunction(fn);
-  }
-
-  private TPrivilegeLevel[] join(TPrivilegeLevel[] level1, TPrivilegeLevel... level2) {
-    TPrivilegeLevel[] levels = new TPrivilegeLevel[level1.length + level2.length];
-    int index = 0;
-    for (TPrivilegeLevel level: level1) {
-      levels[index++] = level;
-    }
-    for (TPrivilegeLevel level: level2) {
-      levels[index++] = level;
-    }
-    return levels;
-  }
-
-  private TPrivilegeLevel[] viewMetadataPrivileges() {
-    return new TPrivilegeLevel[]{TPrivilegeLevel.ALL, TPrivilegeLevel.OWNER,
-        TPrivilegeLevel.SELECT, TPrivilegeLevel.INSERT, TPrivilegeLevel.REFRESH};
-  }
-
-  private static TPrivilegeLevel[] allExcept(TPrivilegeLevel... excludedPrivLevels) {
-    Set<TPrivilegeLevel> excludedSet = Sets.newHashSet(excludedPrivLevels);
-    List<TPrivilegeLevel> privLevels = new ArrayList<>();
-    for (TPrivilegeLevel level: TPrivilegeLevel.values()) {
-      if (!excludedSet.contains(level)) {
-        privLevels.add(level);
-      }
-    }
-    return privLevels.toArray(new TPrivilegeLevel[0]);
-  }
-
-  private interface WithPrincipal {
-    void init(TPrivilege[]... privileges) throws ImpalaException;
-    void cleanUp() throws ImpalaException;
-    String getName();
-  }
-
-  private abstract class WithSentryPrincipal implements WithPrincipal {
-    protected final String role_ = "authz_test_role";
-    protected final String user_ = USER.getName();
-
-    protected void createRole(TPrivilege[]... privileges) throws ImpalaException {
-      Role role = authzCatalog_.addRole(role_);
-      authzCatalog_.addRoleGrantGroup(role_, USER.getName());
-      for (TPrivilege[] privs: privileges) {
-        for (TPrivilege privilege: privs) {
-          privilege.setPrincipal_id(role.getId());
-          privilege.setPrincipal_type(TPrincipalType.ROLE);
-          authzCatalog_.addRolePrivilege(role_, privilege);
-        }
-      }
-    }
-
-    protected void createUser(TPrivilege[]... privileges) throws ImpalaException {
-      org.apache.impala.catalog.User user = authzCatalog_.addUser(user_);
-      for (TPrivilege[] privs: privileges) {
-        for (TPrivilege privilege: privs) {
-          privilege.setPrincipal_id(user.getId());
-          privilege.setPrincipal_type(TPrincipalType.USER);
-          authzCatalog_.addUserPrivilege(user_, privilege);
-        }
-      }
-    }
-
-    protected void dropRole() throws ImpalaException {
-      authzCatalog_.removeRole(role_);
-    }
-
-    protected void dropUser() throws ImpalaException {
-      authzCatalog_.removeUser(user_);
-    }
-  }
-
-  private class WithSentryUser extends WithSentryPrincipal {
-    @Override
-    public void init(TPrivilege[]... privileges) throws ImpalaException {
-      createUser(privileges);
-    }
-
-    @Override
-    public void cleanUp() throws ImpalaException { dropUser(); }
-
-    @Override
-    public String getName() { return user_; }
-  }
-
-  private class WithSentryRole extends WithSentryPrincipal {
-    @Override
-    public void init(TPrivilege[]... privileges) throws ImpalaException {
-      createRole(privileges);
-    }
-
-    @Override
-    public void cleanUp() throws ImpalaException { dropRole(); }
-
-    @Override
-    public String getName() { return role_; }
-  }
-
-  private abstract class WithRanger implements WithPrincipal {
-    private final List<GrantRevokeRequest> requests = new ArrayList<>();
-    private final RangerCatalogdAuthorizationManager authzManager =
-        new RangerCatalogdAuthorizationManager(() -> rangerImpalaPlugin_, null);
-
-    @Override
-    public void init(TPrivilege[]... privileges) throws ImpalaException {
-      for (TPrivilege[] privilege : privileges) {
-        requests.addAll(buildRequest(Arrays.asList(privilege))
-            .stream()
-            .peek(r -> {
-              r.setResource(updateUri(r.getResource()));
-              if (r.getAccessTypes().contains("owner")) {
-                r.getAccessTypes().remove("owner");
-                r.getAccessTypes().add("all");
-              }
-            }).collect(Collectors.toList()));
-      }
-
-      authzManager.grantPrivilege(requests);
-      rangerImpalaPlugin_.refreshPoliciesAndTags();
-    }
-
-    /**
-     * Create the {@link GrantRevokeRequest}s used for granting and revoking privileges.
-     */
-    protected abstract List<GrantRevokeRequest> buildRequest(List<TPrivilege> privileges);
-
-    @Override
-    public void cleanUp() throws ImpalaException {
-      authzManager.revokePrivilege(requests);
-    }
-
-    @Override
-    public String getName() { return USER.getName(); }
-  }
-
-  private class WithRangerUser extends WithRanger {
-    @Override
-    protected List<GrantRevokeRequest> buildRequest(List<TPrivilege> privileges) {
-      return RangerCatalogdAuthorizationManager.createGrantRevokeRequests(
-          RANGER_ADMIN.getName(), USER.getName(), Collections.emptyList(),
-          rangerImpalaPlugin_.getClusterName(), privileges);
-    }
-  }
-
-  private class WithRangerGroup extends WithRanger {
-    @Override
-    protected List<GrantRevokeRequest> buildRequest(List<TPrivilege> privileges) {
-      List<String> groups = Collections.singletonList(System.getProperty("user.name"));
-
-      return RangerCatalogdAuthorizationManager.createGrantRevokeRequests(
-          RANGER_ADMIN.getName(), null, groups,
-          rangerImpalaPlugin_.getClusterName(), privileges);
-    }
-  }
-
-  private static Map<String, String> updateUri(Map<String, String> resources) {
-    String uri = resources.get(RangerImpalaResourceBuilder.URL);
-    if (uri != null && uri.startsWith("/")) {
-      uri = "hdfs://localhost:20500" + uri;
-    }
-    resources.put(RangerImpalaResourceBuilder.URL, uri);
-
-    return resources;
-  }
-
-  private class DescribeOutput {
-    private String[] excludedStrings_ = new String[0];
-    private String[] includedStrings_ = new String[0];
-    private final TDescribeOutputStyle outputStyle_;
-
-    public DescribeOutput(TDescribeOutputStyle style) {
-      outputStyle_ = style;
-    }
-
-    /**
-     * Indicates which strings must not appear in the output of the describe statement.
-     * During validation, if one of these strings exists, an assertion is thrown.
-     *
-     * @param excluded - Array of strings that must not exist in the output.
-     * @return DescribeOutput instance.
-     */
-    public DescribeOutput excludeStrings(String[] excluded) {
-      excludedStrings_ = excluded;
-      return this;
-    }
-
-    /**
-     * Indicates which strings are required to appear in the output of the describe
-     * statement.  During validation, if any one of these strings does not exist, an
-     * assertion is thrown.
-     *
-     * @param included - Array of strings that must exist in the output.
-     * @return DescribeOutput instance.
-     */
-    public DescribeOutput includeStrings(String[] included) {
-      includedStrings_ = included;
-      return this;
-    }
-
-    public void validate(TTableName table) throws ImpalaException {
-      Preconditions.checkArgument(includedStrings_.length != 0 ||
-          excludedStrings_.length != 0,
-          "One or both of included or excluded strings must be defined.");
-      List<String> result = resultToStringList(authzFrontend_.describeTable(table,
-          outputStyle_, USER));
-      for (String str: includedStrings_) {
-        assertTrue(String.format("\"%s\" is not in the describe output.\n" +
-            "Expected : %s\n Actual   : %s", str, Arrays.toString(includedStrings_),
-            result), result.contains(str));
-      }
-      for (String str: excludedStrings_) {
-        assertTrue(String.format("\"%s\" should not be in the describe output.", str),
-            !result.contains(str));
-      }
-    }
-  }
-
-  private DescribeOutput describeOutput(TDescribeOutputStyle style) {
-    return new DescribeOutput(style);
-  }
-
-  private class AuthzTest {
-    private final AnalysisContext context_;
-    private final String stmt_;
-
-    public AuthzTest(String stmt) {
-      this(null, stmt);
-    }
-
-    public AuthzTest(AnalysisContext context, String stmt) {
-      Preconditions.checkNotNull(stmt);
-      context_ = context;
-      stmt_ = stmt;
-    }
-
-    private List<WithPrincipal> buildWithPrincipals() {
-      List<WithPrincipal> withPrincipals = new ArrayList<>();
-      switch (authzProvider_) {
-        case SENTRY:
-          withPrincipals.add(new WithSentryRole());
-          withPrincipals.add(new WithSentryUser());
-          break;
-        case RANGER:
-          withPrincipals.add(new WithRangerUser());
-          withPrincipals.add(new WithRangerGroup());
-          break;
-        default:
-          throw new IllegalArgumentException(String.format(
-              "Unsupported authorization provider: %s", authzProvider_));
-      }
-      return withPrincipals;
-    }
-
-    /**
-     * This method runs with the specified privileges.
-     */
-    public AuthzTest ok(TPrivilege[]... privileges) throws ImpalaException {
-      for (WithPrincipal withPrincipal: buildWithPrincipals()) {
-        try {
-          withPrincipal.init(privileges);
-          if (context_ != null) {
-            authzOk(context_, stmt_, withPrincipal);
-          } else {
-            authzOk(stmt_, withPrincipal);
-          }
-        } finally {
-          withPrincipal.cleanUp();
-        }
-      }
-      return this;
-    }
-
-    /**
-     * This method runs with the specified privileges.
-     */
-    public AuthzTest okDescribe(TTableName table, DescribeOutput output,
-        TPrivilege[]... privileges) throws ImpalaException {
-      for (WithPrincipal withPrincipal: buildWithPrincipals()) {
-        try {
-          withPrincipal.init(privileges);
-          if (context_ != null) {
-            authzOk(context_, stmt_, withPrincipal);
-          } else {
-            authzOk(stmt_, withPrincipal);
-          }
-          output.validate(table);
-        } finally {
-          withPrincipal.cleanUp();
-        }
-      }
-      return this;
-    }
-
-    /**
-     * This method runs with the specified privileges.
-     */
-    public AuthzTest error(String expectedError, TPrivilege[]... privileges)
-        throws ImpalaException {
-      for (WithPrincipal withPrincipal: buildWithPrincipals()) {
-        try {
-          withPrincipal.init(privileges);
-          if (context_ != null) {
-            authzError(context_, stmt_, expectedError, withPrincipal);
-          } else {
-            authzError(stmt_, expectedError, withPrincipal);
-          }
-        } finally {
-          withPrincipal.cleanUp();
-        }
-      }
-      return this;
-    }
-  }
-
-  private AuthzTest authorize(String stmt) {
-    return new AuthzTest(stmt);
-  }
-
-  private AuthzTest authorize(AnalysisContext ctx, String stmt) {
-    return new AuthzTest(ctx, stmt);
-  }
-
-  private TPrivilege[] onServer(TPrivilegeLevel... levels) {
-    return onServer(false, levels);
-  }
-
-  private TPrivilege[] onServer(boolean grantOption, TPrivilegeLevel... levels) {
-    TPrivilege[] privileges = new TPrivilege[levels.length];
-    for (int i = 0; i < levels.length; i++) {
-      privileges[i] = new TPrivilege(levels[i], TPrivilegeScope.SERVER, false);
-      privileges[i].setServer_name(SERVER_NAME);
-      privileges[i].setHas_grant_opt(grantOption);
-    }
-    return privileges;
-  }
-
-  private TPrivilege[] onDatabase(String db, TPrivilegeLevel... levels) {
-    return onDatabase(false, db, levels);
-  }
-
-  private TPrivilege[] onDatabase(boolean grantOption, String db,
-      TPrivilegeLevel... levels) {
-    TPrivilege[] privileges = new TPrivilege[levels.length];
-    for (int i = 0; i < levels.length; i++) {
-      privileges[i] = new TPrivilege(levels[i], TPrivilegeScope.DATABASE, false);
-      privileges[i].setServer_name(SERVER_NAME);
-      privileges[i].setDb_name(db);
-      privileges[i].setHas_grant_opt(grantOption);
-    }
-    return privileges;
-  }
-
-  private TPrivilege[] onTable(String db, String table, TPrivilegeLevel... levels) {
-    return onTable(false, db, table, levels);
-  }
-
-  private TPrivilege[] onTable(boolean grantOption, String db, String table,
-      TPrivilegeLevel... levels) {
-    TPrivilege[] privileges = new TPrivilege[levels.length];
-    for (int i = 0; i < levels.length; i++) {
-      privileges[i] = new TPrivilege(levels[i], TPrivilegeScope.TABLE, false);
-      privileges[i].setServer_name(SERVER_NAME);
-      privileges[i].setDb_name(db);
-      privileges[i].setTable_name(table);
-      privileges[i].setHas_grant_opt(grantOption);
-    }
-    return privileges;
-  }
-
-  private TPrivilege[] onColumn(String db, String table, String column,
-      TPrivilegeLevel... levels) {
-    return onColumn(db, table, new String[]{column}, levels);
-  }
-
-  private TPrivilege[] onColumn(boolean grantOption, String db, String table,
-      String column, TPrivilegeLevel... levels) {
-    return onColumn(grantOption, db, table, new String[]{column}, levels);
-  }
-
-  private TPrivilege[] onColumn(String db, String table, String[] columns,
-      TPrivilegeLevel... levels) {
-    return onColumn(false, db, table, columns, levels);
-  }
-
-  private TPrivilege[] onColumn(boolean grantOption, String db, String table,
-      String[] columns, TPrivilegeLevel... levels) {
-    int size = columns.length * levels.length;
-    TPrivilege[] privileges = new TPrivilege[size];
-    int idx = 0;
-    for (int i = 0; i < levels.length; i++) {
-      for (String column: columns) {
-        privileges[idx] = new TPrivilege(levels[i], TPrivilegeScope.COLUMN, false);
-        privileges[idx].setServer_name(SERVER_NAME);
-        privileges[idx].setDb_name(db);
-        privileges[idx].setTable_name(table);
-        privileges[idx].setColumn_name(column);
-        privileges[idx].setHas_grant_opt(grantOption);
-        idx++;
-      }
-    }
-    return privileges;
-  }
-
-  private TPrivilege[] onUri(String uri, TPrivilegeLevel... levels) {
-    return onUri(false, uri, levels);
-  }
-
-  private TPrivilege[] onUri(boolean grantOption, String uri, TPrivilegeLevel... levels) {
-    TPrivilege[] privileges = new TPrivilege[levels.length];
-    for (int i = 0; i < levels.length; i++) {
-      privileges[i] = new TPrivilege(levels[i], TPrivilegeScope.URI, false);
-      privileges[i].setServer_name(SERVER_NAME);
-      privileges[i].setUri(uri);
-      privileges[i].setHas_grant_opt(grantOption);
-    }
-    return privileges;
-  }
-
-  private void authzOk(String stmt, WithPrincipal withPrincipal) throws ImpalaException {
-    authzOk(authzCtx_, stmt, withPrincipal);
-  }
-
-  private void authzOk(AnalysisContext context, String stmt, WithPrincipal withPrincipal)
-      throws ImpalaException {
-    authzOk(authzFrontend_, context, stmt, withPrincipal);
-  }
-
-  private void authzOk(Frontend fe, AnalysisContext context, String stmt,
-      WithPrincipal withPrincipal) throws ImpalaException {
-    try {
-      parseAndAnalyze(stmt, context, fe);
-    } catch (AuthorizationException e) {
-      // Because the same test can be called from multiple statements
-      // it is useful to know which statement caused the exception.
-      throw new AuthorizationException(String.format(
-          "\nPrincipal: %s\nStatement: %s\nError: %s", withPrincipal.getName(),
-          stmt, e.getMessage(), e));
-    }
-  }
-
-  /**
-   * Verifies that a given statement fails authorization and the expected error
-   * string matches.
-   */
-  private void authzError(String stmt, String expectedError, Matcher matcher,
-      WithPrincipal withPrincipal) throws ImpalaException {
-    authzError(authzCtx_, stmt, expectedError, matcher, withPrincipal);
-  }
-
-  private void authzError(String stmt, String expectedError, WithPrincipal withPrincipal)
-      throws ImpalaException {
-    authzError(authzCtx_, stmt, expectedError, startsWith(), withPrincipal);
-  }
-
-  private void authzError(AnalysisContext ctx, String stmt, String expectedError,
-      Matcher matcher, WithPrincipal withPrincipal) throws ImpalaException {
-    authzError(authzFrontend_, ctx, stmt, expectedError, matcher, withPrincipal);
-  }
-
-  private void authzError(AnalysisContext ctx, String stmt, String expectedError,
-      WithPrincipal withPrincipal) throws ImpalaException {
-    authzError(authzFrontend_, ctx, stmt, expectedError, startsWith(), withPrincipal);
-  }
-
-  private interface Matcher {
-    boolean match(String actual, String expected);
-  }
-
-  private static Matcher exact() {
-    return new Matcher() {
-      @Override
-      public boolean match(String actual, String expected) {
-        return actual.equals(expected);
-      }
-    };
-  }
-
-  private static Matcher startsWith() {
-    return new Matcher() {
-      @Override
-      public boolean match(String actual, String expected) {
-        return actual.startsWith(expected);
-      }
-    };
-  }
-
-  private void authzError(Frontend fe, AnalysisContext ctx, String stmt,
-      String expectedErrorString, Matcher matcher, WithPrincipal withPrincipal)
-      throws ImpalaException {
-    Preconditions.checkNotNull(expectedErrorString);
-    try {
-      parseAndAnalyze(stmt, ctx, fe);
-    } catch (AuthorizationException e) {
-      // Insert the username into the error.
-      expectedErrorString = String.format(expectedErrorString, ctx.getUser());
-      String errorString = e.getMessage();
-      assertTrue(
-          "got error:\n" + errorString + "\nexpected:\n" + expectedErrorString,
-          matcher.match(errorString, expectedErrorString));
-      return;
-    }
-    fail(String.format("Statement did not result in authorization error.\n" +
-        "Principal: %s\nStatement: %s", withPrincipal.getName(), stmt));
-  }
-
   private void verifyPrivilegeReqs(String stmt, Set<String> expectedPrivilegeNames)
       throws ImpalaException {
     verifyPrivilegeReqs(createAnalysisCtx(authzFactory_), stmt, expectedPrivilegeNames);
diff --git a/fe/src/test/java/org/apache/impala/authorization/AuthorizationTest.java b/fe/src/test/java/org/apache/impala/authorization/AuthorizationTest.java
index b8ab93b..b03adb4 100644
--- a/fe/src/test/java/org/apache/impala/authorization/AuthorizationTest.java
+++ b/fe/src/test/java/org/apache/impala/authorization/AuthorizationTest.java
@@ -438,8 +438,6 @@ public class AuthorizationTest extends FrontendTestBase {
     for (User user: users) {
       AnalysisContext ctx = createAnalysisCtx(
           new SentryAuthorizationFactory(AUTHZ_CONFIG), user.getName());
-//    for (User user : users) {
-//      AnalysisContext ctx = createAnalysisCtx(AUTHZ_CONFIG, user.getName());
 
       // Can select from table that user has privileges on.
       AuthzOk(ctx, "select * from functional.alltypesagg");
diff --git a/fe/src/test/java/org/apache/impala/authorization/AuthorizationTestBase.java b/fe/src/test/java/org/apache/impala/authorization/AuthorizationTestBase.java
new file mode 100644
index 0000000..bd3b8cc
--- /dev/null
+++ b/fe/src/test/java/org/apache/impala/authorization/AuthorizationTestBase.java
@@ -0,0 +1,727 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.authorization;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Sets;
+import com.sun.jersey.api.client.ClientResponse;
+import org.apache.impala.analysis.AnalysisContext;
+import org.apache.impala.authorization.ranger.RangerAuthorizationChecker;
+import org.apache.impala.authorization.ranger.RangerAuthorizationConfig;
+import org.apache.impala.authorization.ranger.RangerAuthorizationFactory;
+import org.apache.impala.authorization.ranger.RangerCatalogdAuthorizationManager;
+import org.apache.impala.authorization.ranger.RangerImpalaPlugin;
+import org.apache.impala.authorization.ranger.RangerImpalaResourceBuilder;
+import org.apache.impala.authorization.sentry.SentryAuthorizationConfig;
+import org.apache.impala.authorization.sentry.SentryAuthorizationFactory;
+import org.apache.impala.authorization.sentry.SentryPolicyService;
+import org.apache.impala.catalog.Role;
+import org.apache.impala.catalog.ScalarFunction;
+import org.apache.impala.catalog.Type;
+import org.apache.impala.common.FrontendTestBase;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.service.Frontend;
+import org.apache.impala.testutil.ImpaladTestCatalog;
+import org.apache.impala.thrift.TColumnValue;
+import org.apache.impala.thrift.TDescribeOutputStyle;
+import org.apache.impala.thrift.TDescribeResult;
+import org.apache.impala.thrift.TFunctionBinaryType;
+import org.apache.impala.thrift.TPrincipalType;
+import org.apache.impala.thrift.TPrivilege;
+import org.apache.impala.thrift.TPrivilegeLevel;
+import org.apache.impala.thrift.TPrivilegeScope;
+import org.apache.impala.thrift.TResultRow;
+import org.apache.impala.thrift.TTableName;
+import org.apache.ranger.plugin.util.GrantRevokeRequest;
+import org.apache.ranger.plugin.util.RangerRESTClient;
+import org.apache.ranger.plugin.util.RangerRESTUtils;
+
+import javax.ws.rs.core.Response.Status.Family;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+/**
+ * Base class for authorization tests.
+ */
+public abstract class AuthorizationTestBase extends FrontendTestBase {
+  protected static final String RANGER_ADMIN_URL = "http://localhost:6080";
+  protected static final String RANGER_USER = "admin";
+  protected static final String RANGER_PASSWORD = "admin";
+  protected static final String SERVER_NAME = "server1";
+  protected static final User USER = new User(System.getProperty("user.name"));
+  protected static final String RANGER_SERVICE_TYPE = "hive";
+  protected static final String RANGER_SERVICE_NAME = "test_impala";
+  protected static final String RANGER_APP_ID = "impala";
+  protected static final User RANGER_ADMIN = new User("admin");
+
+  protected final AuthorizationConfig authzConfig_;
+  protected final AuthorizationFactory authzFactory_;
+  protected final AuthorizationProvider authzProvider_;
+  protected final AnalysisContext authzCtx_;
+  protected final SentryPolicyService sentryService_;
+  protected final ImpaladTestCatalog authzCatalog_;
+  protected final Frontend authzFrontend_;
+  protected final RangerImpalaPlugin rangerImpalaPlugin_;
+  protected final RangerRESTClient rangerRestClient_;
+
+  public AuthorizationTestBase(AuthorizationProvider authzProvider)
+      throws ImpalaException {
+    authzProvider_ = authzProvider;
+    switch (authzProvider) {
+      case SENTRY:
+        authzConfig_ = SentryAuthorizationConfig.createHadoopGroupAuthConfig(
+            "server1",
+            System.getenv("IMPALA_HOME") + "/fe/src/test/resources/sentry-site.xml");
+        authzFactory_ = createAuthorizationFactory(authzProvider);
+        authzCtx_ = createAnalysisCtx(authzFactory_, USER.getName());
+        authzCatalog_ = new ImpaladTestCatalog(authzFactory_);
+        authzFrontend_ = new Frontend(authzFactory_, authzCatalog_);
+        sentryService_ = new SentryPolicyService(
+            ((SentryAuthorizationConfig) authzConfig_).getSentryConfig());
+        rangerImpalaPlugin_ = null;
+        rangerRestClient_ = null;
+        break;
+      case RANGER:
+        authzConfig_ = new RangerAuthorizationConfig(RANGER_SERVICE_TYPE, RANGER_APP_ID,
+            SERVER_NAME);
+        authzFactory_ = createAuthorizationFactory(authzProvider);
+        authzCtx_ = createAnalysisCtx(authzFactory_, USER.getName());
+        authzCatalog_ = new ImpaladTestCatalog(authzFactory_);
+        authzFrontend_ = new Frontend(authzFactory_, authzCatalog_);
+        rangerImpalaPlugin_ =
+            ((RangerAuthorizationChecker) authzFrontend_.getAuthzChecker())
+                .getRangerImpalaPlugin();
+        sentryService_ = null;
+        rangerRestClient_ = new RangerRESTClient(RANGER_ADMIN_URL, null);
+        rangerRestClient_.setBasicAuthInfo(RANGER_USER, RANGER_PASSWORD);
+        break;
+      default:
+        throw new IllegalArgumentException(String.format(
+            "Unsupported authorization provider: %s", authzProvider));
+    }
+  }
+
+  protected AuthorizationFactory createAuthorizationFactory(
+      AuthorizationProvider authzProvider) {
+    return authzProvider == AuthorizationProvider.SENTRY ?
+        new SentryAuthorizationFactory(authzConfig_) :
+        new RangerAuthorizationFactory(authzConfig_);
+  }
+
+  protected interface WithPrincipal {
+    void init(TPrivilege[]... privileges) throws ImpalaException;
+    void cleanUp() throws ImpalaException;
+    String getName();
+  }
+
+  protected abstract class WithSentryPrincipal implements WithPrincipal {
+    protected final String role_ = "authz_test_role";
+    protected final String user_ = USER.getName();
+
+    protected void createRole(TPrivilege[]... privileges) throws ImpalaException {
+      Role role = authzCatalog_.addRole(role_);
+      authzCatalog_.addRoleGrantGroup(role_, USER.getName());
+      for (TPrivilege[] privs: privileges) {
+        for (TPrivilege privilege: privs) {
+          privilege.setPrincipal_id(role.getId());
+          privilege.setPrincipal_type(TPrincipalType.ROLE);
+          authzCatalog_.addRolePrivilege(role_, privilege);
+        }
+      }
+    }
+
+    protected void createUser(TPrivilege[]... privileges) throws ImpalaException {
+      org.apache.impala.catalog.User user = authzCatalog_.addUser(user_);
+      for (TPrivilege[] privs: privileges) {
+        for (TPrivilege privilege: privs) {
+          privilege.setPrincipal_id(user.getId());
+          privilege.setPrincipal_type(TPrincipalType.USER);
+          authzCatalog_.addUserPrivilege(user_, privilege);
+        }
+      }
+    }
+
+    protected void dropRole() throws ImpalaException {
+      authzCatalog_.removeRole(role_);
+    }
+
+    protected void dropUser() throws ImpalaException {
+      authzCatalog_.removeUser(user_);
+    }
+  }
+
+  public class WithSentryUser extends WithSentryPrincipal {
+    @Override
+    public void init(TPrivilege[]... privileges) throws ImpalaException {
+      createUser(privileges);
+    }
+
+    @Override
+    public void cleanUp() throws ImpalaException { dropUser(); }
+
+    @Override
+    public String getName() { return user_; }
+  }
+
+  public class WithSentryRole extends WithSentryPrincipal {
+    @Override
+    public void init(TPrivilege[]... privileges) throws ImpalaException {
+      createRole(privileges);
+    }
+
+    @Override
+    public void cleanUp() throws ImpalaException { dropRole(); }
+
+    @Override
+    public String getName() { return role_; }
+  }
+
+  protected abstract class WithRanger implements WithPrincipal {
+    private final List<GrantRevokeRequest> requests = new ArrayList<>();
+    private final RangerCatalogdAuthorizationManager authzManager =
+        new RangerCatalogdAuthorizationManager(() -> rangerImpalaPlugin_, null);
+
+    @Override
+    public void init(TPrivilege[]... privileges) throws ImpalaException {
+      for (TPrivilege[] privilege : privileges) {
+        requests.addAll(buildRequest(Arrays.asList(privilege))
+            .stream()
+            .peek(r -> {
+              r.setResource(updateUri(r.getResource()));
+              if (r.getAccessTypes().contains("owner")) {
+                r.getAccessTypes().remove("owner");
+                r.getAccessTypes().add("all");
+              }
+            }).collect(Collectors.toList()));
+      }
+
+      authzManager.grantPrivilege(requests);
+      rangerImpalaPlugin_.refreshPoliciesAndTags();
+    }
+
+    /**
+     * Create the {@link GrantRevokeRequest}s used for granting and revoking privileges.
+     */
+    protected abstract List<GrantRevokeRequest> buildRequest(List<TPrivilege> privileges);
+
+    @Override
+    public void cleanUp() throws ImpalaException {
+      authzManager.revokePrivilege(requests);
+    }
+
+    @Override
+    public String getName() { return USER.getName(); }
+  }
+
+  public class WithRangerUser extends WithRanger {
+    @Override
+    protected List<GrantRevokeRequest> buildRequest(List<TPrivilege> privileges) {
+      return RangerCatalogdAuthorizationManager.createGrantRevokeRequests(
+          RANGER_ADMIN.getName(), USER.getName(), Collections.emptyList(),
+          rangerImpalaPlugin_.getClusterName(), privileges);
+    }
+  }
+
+  public class WithRangerGroup extends WithRanger {
+    @Override
+    protected List<GrantRevokeRequest> buildRequest(List<TPrivilege> privileges) {
+      List<String> groups = Collections.singletonList(System.getProperty("user.name"));
+
+      return RangerCatalogdAuthorizationManager.createGrantRevokeRequests(
+          RANGER_ADMIN.getName(), null, groups,
+          rangerImpalaPlugin_.getClusterName(), privileges);
+    }
+  }
+
+  private static Map<String, String> updateUri(Map<String, String> resources) {
+    String uri = resources.get(RangerImpalaResourceBuilder.URL);
+    if (uri != null && uri.startsWith("/")) {
+      uri = "hdfs://localhost:20500" + uri;
+    }
+    resources.put(RangerImpalaResourceBuilder.URL, uri);
+
+    return resources;
+  }
+
+  protected class DescribeOutput {
+    private String[] excludedStrings_ = new String[0];
+    private String[] includedStrings_ = new String[0];
+    private final TDescribeOutputStyle outputStyle_;
+
+    public DescribeOutput(TDescribeOutputStyle style) {
+      outputStyle_ = style;
+    }
+
+    /**
+     * Indicates which strings must not appear in the output of the describe statement.
+     * During validation, if one of these strings exists, an assertion is thrown.
+     *
+     * @param excluded - Array of strings that must not exist in the output.
+     * @return DescribeOutput instance.
+     */
+    public DescribeOutput excludeStrings(String[] excluded) {
+      excludedStrings_ = excluded;
+      return this;
+    }
+
+    /**
+     * Indicates which strings are required to appear in the output of the describe
+     * statement.  During validation, if any one of these strings does not exist, an
+     * assertion is thrown.
+     *
+     * @param included - Array of strings that must exist in the output.
+     * @return DescribeOutput instance.
+     */
+    public DescribeOutput includeStrings(String[] included) {
+      includedStrings_ = included;
+      return this;
+    }
+
+    public void validate(TTableName table) throws ImpalaException {
+      Preconditions.checkArgument(includedStrings_.length != 0 ||
+              excludedStrings_.length != 0,
+          "One or both of included or excluded strings must be defined.");
+      List<String> result = resultToStringList(authzFrontend_.describeTable(table,
+          outputStyle_, USER));
+      for (String str: includedStrings_) {
+        assertTrue(String.format("\"%s\" is not in the describe output.\n" +
+                "Expected : %s\n Actual   : %s", str, Arrays.toString(includedStrings_),
+            result), result.contains(str));
+      }
+      for (String str: excludedStrings_) {
+        assertTrue(String.format("\"%s\" should not be in the describe output.", str),
+            !result.contains(str));
+      }
+    }
+  }
+
+  protected DescribeOutput describeOutput(TDescribeOutputStyle style) {
+    return new DescribeOutput(style);
+  }
+
+  protected List<WithPrincipal> buildWithPrincipals() {
+    List<WithPrincipal> withPrincipals = new ArrayList<>();
+    switch (authzProvider_) {
+      case SENTRY:
+        withPrincipals.add(new WithSentryRole());
+        withPrincipals.add(new WithSentryUser());
+        break;
+      case RANGER:
+        withPrincipals.add(new WithRangerUser());
+        withPrincipals.add(new WithRangerGroup());
+        break;
+      default:
+        throw new IllegalArgumentException(String.format(
+            "Unsupported authorization provider: %s", authzProvider_));
+    }
+    return withPrincipals;
+  }
+
+  protected class AuthzTest {
+    private final AnalysisContext context_;
+    private final String stmt_;
+
+    public AuthzTest(String stmt) {
+      this(null, stmt);
+    }
+
+    public AuthzTest(AnalysisContext context, String stmt) {
+      Preconditions.checkNotNull(stmt);
+      context_ = context;
+      stmt_ = stmt;
+    }
+
+    /**
+     * This method runs with the specified privileges.
+     */
+    public AuthzTest ok(TPrivilege[]... privileges)
+        throws ImpalaException {
+      for (WithPrincipal withPrincipal: buildWithPrincipals()) {
+        try {
+          withPrincipal.init(privileges);
+          if (context_ != null) {
+            authzOk(context_, stmt_, withPrincipal);
+          } else {
+            authzOk(stmt_, withPrincipal);
+          }
+        } finally {
+          withPrincipal.cleanUp();
+        }
+      }
+      return this;
+    }
+
+    /**
+     * This method runs with the specified privileges.
+     */
+    public AuthzTest okDescribe(TTableName table, DescribeOutput output,
+        TPrivilege[]... privileges) throws ImpalaException {
+      for (WithPrincipal withPrincipal: buildWithPrincipals()) {
+        try {
+          withPrincipal.init(privileges);
+          if (context_ != null) {
+            authzOk(context_, stmt_, withPrincipal);
+          } else {
+            authzOk(stmt_, withPrincipal);
+          }
+          output.validate(table);
+        } finally {
+          withPrincipal.cleanUp();
+        }
+      }
+      return this;
+    }
+
+    /**
+     * This method runs with the specified privileges.
+     */
+    public AuthzTest error(String expectedError, TPrivilege[]... privileges)
+        throws ImpalaException {
+      for (WithPrincipal withPrincipal: buildWithPrincipals()) {
+        try {
+          withPrincipal.init(privileges);
+          if (context_ != null) {
+            authzError(context_, stmt_, expectedError, withPrincipal);
+          } else {
+            authzError(stmt_, expectedError, withPrincipal);
+          }
+        } finally {
+          withPrincipal.cleanUp();
+        }
+      }
+      return this;
+    }
+  }
+
+  protected AuthzTest authorize(String stmt) {
+    return new AuthzTest(stmt);
+  }
+
+  protected AuthzTest authorize(AnalysisContext ctx, String stmt) {
+    return new AuthzTest(ctx, stmt);
+  }
+
+  protected TPrivilege[] onServer(TPrivilegeLevel... levels) {
+    return onServer(false, levels);
+  }
+
+  protected TPrivilege[] onServer(boolean grantOption, TPrivilegeLevel... levels) {
+    TPrivilege[] privileges = new TPrivilege[levels.length];
+    for (int i = 0; i < levels.length; i++) {
+      privileges[i] = new TPrivilege(levels[i], TPrivilegeScope.SERVER, false);
+      privileges[i].setServer_name(SERVER_NAME);
+      privileges[i].setHas_grant_opt(grantOption);
+    }
+    return privileges;
+  }
+
+  protected TPrivilege[] onDatabase(String db, TPrivilegeLevel... levels) {
+    return onDatabase(false, db, levels);
+  }
+
+  protected TPrivilege[] onDatabase(boolean grantOption, String db,
+      TPrivilegeLevel... levels) {
+    TPrivilege[] privileges = new TPrivilege[levels.length];
+    for (int i = 0; i < levels.length; i++) {
+      privileges[i] = new TPrivilege(levels[i], TPrivilegeScope.DATABASE, false);
+      privileges[i].setServer_name(SERVER_NAME);
+      privileges[i].setDb_name(db);
+      privileges[i].setHas_grant_opt(grantOption);
+    }
+    return privileges;
+  }
+
+  protected TPrivilege[] onTable(String db, String table, TPrivilegeLevel... levels) {
+    return onTable(false, db, table, levels);
+  }
+
+  protected TPrivilege[] onTable(boolean grantOption, String db, String table,
+      TPrivilegeLevel... levels) {
+    TPrivilege[] privileges = new TPrivilege[levels.length];
+    for (int i = 0; i < levels.length; i++) {
+      privileges[i] = new TPrivilege(levels[i], TPrivilegeScope.TABLE, false);
+      privileges[i].setServer_name(SERVER_NAME);
+      privileges[i].setDb_name(db);
+      privileges[i].setTable_name(table);
+      privileges[i].setHas_grant_opt(grantOption);
+    }
+    return privileges;
+  }
+
+  protected TPrivilege[] onColumn(String db, String table, String column,
+      TPrivilegeLevel... levels) {
+    return onColumn(db, table, new String[]{column}, levels);
+  }
+
+  protected TPrivilege[] onColumn(boolean grantOption, String db, String table,
+      String column, TPrivilegeLevel... levels) {
+    return onColumn(grantOption, db, table, new String[]{column}, levels);
+  }
+
+  protected TPrivilege[] onColumn(String db, String table, String[] columns,
+      TPrivilegeLevel... levels) {
+    return onColumn(false, db, table, columns, levels);
+  }
+
+  protected TPrivilege[] onColumn(boolean grantOption, String db, String table,
+      String[] columns, TPrivilegeLevel... levels) {
+    int size = columns.length * levels.length;
+    TPrivilege[] privileges = new TPrivilege[size];
+    int idx = 0;
+    for (int i = 0; i < levels.length; i++) {
+      for (String column: columns) {
+        privileges[idx] = new TPrivilege(levels[i], TPrivilegeScope.COLUMN, false);
+        privileges[idx].setServer_name(SERVER_NAME);
+        privileges[idx].setDb_name(db);
+        privileges[idx].setTable_name(table);
+        privileges[idx].setColumn_name(column);
+        privileges[idx].setHas_grant_opt(grantOption);
+        idx++;
+      }
+    }
+    return privileges;
+  }
+
+  protected TPrivilege[] onUri(String uri, TPrivilegeLevel... levels) {
+    return onUri(false, uri, levels);
+  }
+
+  protected TPrivilege[] onUri(boolean grantOption, String uri,
+      TPrivilegeLevel... levels) {
+    TPrivilege[] privileges = new TPrivilege[levels.length];
+    for (int i = 0; i < levels.length; i++) {
+      privileges[i] = new TPrivilege(levels[i], TPrivilegeScope.URI, false);
+      privileges[i].setServer_name(SERVER_NAME);
+      privileges[i].setUri(uri);
+      privileges[i].setHas_grant_opt(grantOption);
+    }
+    return privileges;
+  }
+
+  private void authzOk(String stmt, WithPrincipal withPrincipal) throws ImpalaException {
+    authzOk(authzCtx_, stmt, withPrincipal);
+  }
+
+  private void authzOk(AnalysisContext context, String stmt, WithPrincipal withPrincipal)
+      throws ImpalaException {
+    try {
+      parseAndAnalyze(stmt, context, authzFrontend_);
+    } catch (AuthorizationException e) {
+      // Because the same test can be called from multiple statements
+      // it is useful to know which statement caused the exception.
+      throw new AuthorizationException(String.format(
+          "\nPrincipal: %s\nStatement: %s\nError: %s", withPrincipal.getName(),
+          stmt, e.getMessage(), e));
+    }
+  }
+
+  /**
+   * Verifies that a given statement fails authorization and the expected error
+   * string matches.
+   */
+  private void authzError(String stmt, String expectedError,
+      WithPrincipal withPrincipal) throws ImpalaException {
+    authzError(authzCtx_, stmt, expectedError, startsWith(), withPrincipal);
+  }
+
+  private void authzError(AnalysisContext context, String stmt, String expectedError,
+      WithPrincipal withPrincipal) throws ImpalaException {
+    authzError(context, stmt, expectedError, startsWith(), withPrincipal);
+  }
+
+  @FunctionalInterface
+  private interface Matcher {
+    boolean match(String actual, String expected);
+  }
+
+  private static Matcher exact() {
+    return (actual, expected) -> actual.equals(expected);
+  }
+
+  private static Matcher startsWith() {
+    return (actual, expected) -> actual.startsWith(expected);
+  }
+
+  private void authzError(AnalysisContext ctx, String stmt,
+      String expectedErrorString, Matcher matcher, WithPrincipal withPrincipal)
+      throws ImpalaException {
+    Preconditions.checkNotNull(expectedErrorString);
+    try {
+      parseAndAnalyze(stmt, ctx, authzFrontend_);
+    } catch (AuthorizationException e) {
+      // Insert the username into the error.
+      expectedErrorString = String.format(expectedErrorString, ctx.getUser());
+      String errorString = e.getMessage();
+      assertTrue(
+          "got error:\n" + errorString + "\nexpected:\n" + expectedErrorString,
+          matcher.match(errorString, expectedErrorString));
+      return;
+    }
+    fail(String.format("Statement did not result in authorization error.\n" +
+        "Principal: %s\nStatement: %s", withPrincipal.getName(), stmt));
+  }
+
+  protected void createRangerPolicy(String policyName, String json) {
+    ClientResponse response = rangerRestClient_
+        .getResource("/service/public/v2/api/policy")
+        .accept(RangerRESTUtils.REST_MIME_TYPE_JSON)
+        .type(RangerRESTUtils.REST_MIME_TYPE_JSON)
+        .post(ClientResponse.class, json);
+    if (response.getStatusInfo().getFamily() != Family.SUCCESSFUL) {
+      throw new RuntimeException(
+          String.format("Unable to create a Ranger policy: %s.", policyName));
+    }
+  }
+
+  protected void deleteRangerPolicy(String policyName) {
+    ClientResponse response = rangerRestClient_
+        .getResource("/service/public/v2/api/policy")
+        .queryParam("servicename", RANGER_SERVICE_NAME)
+        .queryParam("policyname", policyName)
+        .delete(ClientResponse.class);
+    if (response.getStatusInfo().getFamily() != Family.SUCCESSFUL) {
+      throw new RuntimeException(
+          String.format("Unable to delete Ranger policy: %s.", policyName));
+    }
+  }
+
+  // Convert TDescribeResult to list of strings.
+  private static List<String> resultToStringList(TDescribeResult result) {
+    List<String> list = new ArrayList<>();
+    for (TResultRow row: result.getResults()) {
+      for (TColumnValue col: row.getColVals()) {
+        list.add(col.getString_val() == null ? "NULL": col.getString_val().trim());
+      }
+    }
+    return list;
+  }
+
+  protected static String selectError(String object) {
+    return "User '%s' does not have privileges to execute 'SELECT' on: " + object;
+  }
+
+  protected static String insertError(String object) {
+    return "User '%s' does not have privileges to execute 'INSERT' on: " + object;
+  }
+
+  protected static String createError(String object) {
+    return "User '%s' does not have privileges to execute 'CREATE' on: " + object;
+  }
+
+  protected static String alterError(String object) {
+    return "User '%s' does not have privileges to execute 'ALTER' on: " + object;
+  }
+
+  protected static String dropError(String object) {
+    return "User '%s' does not have privileges to execute 'DROP' on: " + object;
+  }
+
+  protected static String accessError(String object) {
+    return accessError(false, object);
+  }
+
+  protected static String accessError(boolean grantOption, String object) {
+    return "User '%s' does not have privileges" +
+        (grantOption ? " with 'GRANT OPTION'" : "") + " to access: " + object;
+  }
+
+  protected static String refreshError(String object) {
+    return "User '%s' does not have privileges to execute " +
+        "'INVALIDATE METADATA/REFRESH' on: " + object;
+  }
+
+  protected static String systemDbError() {
+    return "Cannot modify system database.";
+  }
+
+  protected static String viewDefError(String object) {
+    return "User '%s' does not have privileges to see the definition of view '" +
+        object + "'.";
+  }
+
+  protected static String createFunctionError(String object) {
+    return "User '%s' does not have privileges to CREATE functions in: " + object;
+  }
+
+  protected static String dropFunctionError(String object) {
+    return "User '%s' does not have privileges to DROP functions in: " + object;
+  }
+
+  protected static String columnMaskError(String object) {
+    return "Impala does not support column masking yet. Column masking is enabled on " +
+        "column: " + object;
+  }
+
+  protected static String rowFilterError(String object) {
+    return "Impala does not support row filtering yet. Row filtering is enabled on " +
+        "table: " + object;
+  }
+
+  protected ScalarFunction addFunction(String db, String fnName, List<Type> argTypes,
+      Type retType, String uriPath, String symbolName) {
+    ScalarFunction fn = ScalarFunction.createForTesting(db, fnName, argTypes, retType,
+        uriPath, symbolName, null, null, TFunctionBinaryType.NATIVE);
+    authzCatalog_.addFunction(fn);
+    return fn;
+  }
+
+  protected ScalarFunction addFunction(String db, String fnName) {
+    return addFunction(db, fnName, new ArrayList<>(), Type.INT, "/dummy",
+        "dummy.class");
+  }
+
+  protected void removeFunction(ScalarFunction fn) {
+    authzCatalog_.removeFunction(fn);
+  }
+
+  protected TPrivilegeLevel[] join(TPrivilegeLevel[] level1, TPrivilegeLevel... level2) {
+    TPrivilegeLevel[] levels = new TPrivilegeLevel[level1.length + level2.length];
+    int index = 0;
+    for (TPrivilegeLevel level: level1) {
+      levels[index++] = level;
+    }
+    for (TPrivilegeLevel level: level2) {
+      levels[index++] = level;
+    }
+    return levels;
+  }
+
+  protected TPrivilegeLevel[] viewMetadataPrivileges() {
+    return new TPrivilegeLevel[]{TPrivilegeLevel.ALL, TPrivilegeLevel.OWNER,
+        TPrivilegeLevel.SELECT, TPrivilegeLevel.INSERT, TPrivilegeLevel.REFRESH};
+  }
+
+  protected static TPrivilegeLevel[] allExcept(TPrivilegeLevel... excludedPrivLevels) {
+    Set<TPrivilegeLevel> excludedSet = Sets.newHashSet(excludedPrivLevels);
+    List<TPrivilegeLevel> privLevels = new ArrayList<>();
+    for (TPrivilegeLevel level: TPrivilegeLevel.values()) {
+      if (!excludedSet.contains(level)) {
+        privLevels.add(level);
+      }
+    }
+    return privLevels.toArray(new TPrivilegeLevel[0]);
+  }
+}
diff --git a/fe/src/test/java/org/apache/impala/authorization/ranger/RangerAuditLogTest.java b/fe/src/test/java/org/apache/impala/authorization/ranger/RangerAuditLogTest.java
new file mode 100644
index 0000000..9521995
--- /dev/null
+++ b/fe/src/test/java/org/apache/impala/authorization/ranger/RangerAuditLogTest.java
@@ -0,0 +1,196 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.authorization.ranger;
+
+import org.apache.impala.authorization.AuthorizationChecker;
+import org.apache.impala.authorization.AuthorizationConfig;
+import org.apache.impala.authorization.AuthorizationContext;
+import org.apache.impala.authorization.AuthorizationFactory;
+import org.apache.impala.authorization.AuthorizationPolicy;
+import org.apache.impala.authorization.AuthorizationProvider;
+import org.apache.impala.authorization.AuthorizationTestBase;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.thrift.TPrivilege;
+import org.apache.impala.thrift.TPrivilegeLevel;
+import org.apache.ranger.audit.model.AuthzAuditEvent;
+import org.junit.Test;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.function.Consumer;
+
+import static org.junit.Assert.assertEquals;
+
+public class RangerAuditLogTest extends AuthorizationTestBase {
+  private RangerAuthorizationCheckerSpy authzChecker_;
+
+  private static class RangerAuthorizationCheckerSpy extends RangerAuthorizationChecker {
+    private AuthorizationContext authzCtx_;
+
+    public RangerAuthorizationCheckerSpy(AuthorizationConfig authzConfig) {
+      super(authzConfig);
+    }
+
+    @Override
+    public void postAuthorize(AuthorizationContext authzCtx) {
+      super.postAuthorize(authzCtx);
+      authzCtx_ = authzCtx;
+    }
+  }
+
+  public RangerAuditLogTest() throws ImpalaException {
+    super(AuthorizationProvider.RANGER);
+  }
+
+  @Test
+  public void testAuditLogSuccess() throws ImpalaException {
+    authzOk(events -> {
+      assertEquals(1, events.size());
+      assertEventEquals("@database", "create", "test_db", 1, events.get(0));
+    }, "create database test_db", onServer(TPrivilegeLevel.CREATE));
+
+    authzOk(events -> {
+      assertEquals(1, events.size());
+      assertEventEquals("@table", "create", "functional/test_tbl", 1, events.get(0));
+    }, "create table functional.test_tbl(i int)", onDatabase("functional",
+        TPrivilegeLevel.CREATE));
+
+    authzOk(events -> {
+      assertEquals(2, events.size());
+      assertEventEquals("@udf", "create", "functional/f()", 1, events.get(0));
+      assertEventEquals("@url", "all",
+          "hdfs://localhost:20500/test-warehouse/libTestUdfs.so", 1, events.get(1));
+    }, "create function functional.f() returns int location " +
+        "'hdfs://localhost:20500/test-warehouse/libTestUdfs.so' symbol='NoArgs'",
+        onDatabase("functional", TPrivilegeLevel.CREATE),
+        onUri("hdfs://localhost:20500/test-warehouse/libTestUdfs.so",
+            TPrivilegeLevel.ALL));
+
+    authzOk(events -> {
+      assertEquals(2, events.size());
+      assertEventEquals("@table", "create", "functional/new_table", 1, events.get(0));
+      assertEventEquals("@url", "all", "hdfs://localhost:20500/test-warehouse/new_table",
+          1, events.get(1));
+    }, "create table functional.new_table(i int) location " +
+        "'hdfs://localhost:20500/test-warehouse/new_table'",
+        onDatabase("functional", TPrivilegeLevel.CREATE),
+        onUri("hdfs://localhost:20500/test-warehouse/new_table", TPrivilegeLevel.ALL));
+
+    authzOk(events -> {
+      // Only the table event.
+      assertEquals(1, events.size());
+      assertEventEquals("@table", "select", "functional/alltypes", 1, events.get(0));
+    }, "select id, string_col from functional.alltypes",
+        onTable("functional", "alltypes", TPrivilegeLevel.SELECT));
+
+    authzOk(events -> {
+      // Only the column events. We don't want to log the failing table event used for
+      // short circuiting.
+      assertEquals(2, events.size());
+      assertEventEquals("@column", "select", "functional/alltypes/id", 1, events.get(0));
+      assertEventEquals("@column", "select", "functional/alltypes/string_col", 1,
+          events.get(1));
+    }, "select id, string_col from functional.alltypes",
+        onColumn("functional", "alltypes", "id", TPrivilegeLevel.SELECT),
+        onColumn("functional", "alltypes", "string_col", TPrivilegeLevel.SELECT));
+
+    authzOk(events -> {
+      assertEquals(3, events.size());
+      assertEventEquals("@column", "refresh", "*/*/*", 1, events.get(0));
+      assertEventEquals("@udf", "refresh", "*/*", 1, events.get(1));
+      assertEventEquals("@url", "refresh", "*", 1, events.get(2));
+    }, "invalidate metadata", onServer(TPrivilegeLevel.REFRESH));
+  }
+
+  @Test
+  public void testAuditLogFailure() throws ImpalaException {
+    authzError(events -> {
+      assertEquals(1, events.size());
+      assertEventEquals("@database", "create", "test_db", 0, events.get(0));
+    }, "create database test_db");
+
+    authzError(events -> {
+      assertEquals(1, events.size());
+      assertEventEquals("@table", "create", "functional/test_tbl", 0, events.get(0));
+    }, "create table functional.test_tbl(i int)");
+
+    authzError(events -> {
+      // Only log first the first failure.
+      assertEquals(1, events.size());
+      assertEventEquals("@udf", "create", "functional/f()", 0, events.get(0));
+    }, "create function functional.f() returns int location " +
+        "'hdfs://localhost:20500/test-warehouse/libTestUdfs.so' symbol='NoArgs'");
+
+    authzError(events -> {
+      assertEquals(1, events.size());
+      assertEventEquals("@url", "all", "hdfs://localhost:20500/test-warehouse/new_table",
+          0, events.get(0));
+    }, "create table functional.new_table(i int) location " +
+        "'hdfs://localhost:20500/test-warehouse/new_table'",
+        onDatabase("functional", TPrivilegeLevel.CREATE));
+
+    authzError(events -> {
+      // Only log the first column event. We do not log the table access used for
+      // short-circuiting.
+      assertEquals(1, events.size());
+      assertEventEquals("@column", "select", "functional/alltypes/id", 0, events.get(0));
+    }, "select id, string_col from functional.alltypes");
+  }
+
+  private void authzOk(Consumer<List<AuthzAuditEvent>> resultChecker, String stmt,
+      TPrivilege[]... privileges) throws ImpalaException {
+    authorize(stmt).ok(privileges);
+    RangerAuthorizationContext rangerCtx =
+        (RangerAuthorizationContext) authzChecker_.authzCtx_;
+    resultChecker.accept(rangerCtx.getAuditHandler().getAuthzEvents());
+  }
+
+  private void authzError(Consumer<List<AuthzAuditEvent>> resultChecker, String stmt,
+      TPrivilege[]... privileges) throws ImpalaException {
+    authorize(stmt).error("", privileges);
+    RangerAuthorizationContext rangerCtx =
+        (RangerAuthorizationContext) authzChecker_.authzCtx_;
+    resultChecker.accept(rangerCtx.getAuditHandler().getAuthzEvents());
+  }
+
+  private static void assertEventEquals(String resourceType, String accessType,
+      String resourcePath, int accessResult, AuthzAuditEvent event) {
+    assertEquals(resourceType, event.getResourceType());
+    assertEquals(accessType, event.getAccessType());
+    assertEquals(resourcePath, event.getResourcePath());
+    assertEquals(accessResult, event.getAccessResult());
+  }
+
+  @Override
+  protected List<WithPrincipal> buildWithPrincipals() {
+    return Collections.singletonList(new WithRangerUser());
+  }
+
+  @Override
+  protected AuthorizationFactory createAuthorizationFactory(
+      AuthorizationProvider authzProvider) {
+    return new RangerAuthorizationFactory(authzConfig_) {
+      @Override
+      public AuthorizationChecker newAuthorizationChecker(
+          AuthorizationPolicy authzPolicy) {
+        authzChecker_ = new RangerAuthorizationCheckerSpy(authzConfig_);
+        return authzChecker_;
+      }
+    };
+  }
+}
diff --git a/fe/src/test/java/org/apache/impala/common/FrontendTestBase.java b/fe/src/test/java/org/apache/impala/common/FrontendTestBase.java
index 65a93c0..66a3393 100644
--- a/fe/src/test/java/org/apache/impala/common/FrontendTestBase.java
+++ b/fe/src/test/java/org/apache/impala/common/FrontendTestBase.java
@@ -36,6 +36,7 @@ import org.apache.impala.analysis.StmtMetadataLoader;
 import org.apache.impala.analysis.StmtMetadataLoader.StmtTableCache;
 import org.apache.impala.authorization.AuthorizationChecker;
 import org.apache.impala.authorization.AuthorizationConfig;
+import org.apache.impala.authorization.AuthorizationContext;
 import org.apache.impala.authorization.AuthorizationException;
 import org.apache.impala.authorization.AuthorizationFactory;
 import org.apache.impala.authorization.AuthorizationManager;
@@ -336,8 +337,8 @@ public class FrontendTestBase extends AbstractFrontendTest {
         AuthorizationConfig authzConfig = getAuthorizationConfig();
         return new BaseAuthorizationChecker(authzConfig) {
           @Override
-          protected boolean authorize(User user, PrivilegeRequest request)
-              throws InternalException {
+          protected boolean authorizeResource(AuthorizationContext authzCtx, User user,
+              PrivilegeRequest request) throws InternalException {
             return authorized;
           }
 
@@ -354,6 +355,11 @@ public class FrontendTestBase extends AbstractFrontendTest {
 
           @Override
           public void invalidateAuthorizationCache() {}
+
+          @Override
+          public AuthorizationContext createAuthorizationContext(boolean doAudits) {
+            return new AuthorizationContext();
+          }
         };
       }
 


[impala] 07/08: IMPALA-8585: Fix for upgraded + compacted acid tables

Posted by bo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

boroknagyz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 5faf1745b0e27ff3f95d532ad5e7dc8e34310172
Author: Csaba Ringhofer <cs...@cloudera.com>
AuthorDate: Fri May 24 15:28:15 2019 +0200

    IMPALA-8585: Fix for upgraded + compacted acid tables
    
    Tables that already had data before altered to be an ACID table
    keep the old data in their root table/partition directory if
    hive.mm.allow.originals == true. These files should be merged to
    the base file during the first compaction, so should be read only
    if there is no valid base yet.
    
    Also added EE tests for upgraded tables.
    
    Change-Id: I062d8e76f90e0da1b954bf156208c0afb424deb1
    Reviewed-on: http://gerrit.cloudera.org:8080/13427
    Reviewed-by: Zoltan Borok-Nagy <bo...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 .../java/org/apache/impala/util/AcidUtils.java     |  8 +++-
 .../java/org/apache/impala/util/AcidUtilsTest.java | 49 ++++++++++++----------
 .../queries/QueryTest/acid-compaction.test         | 31 ++++++++++++++
 .../functional-query/queries/QueryTest/acid.test   | 18 ++++++++
 tests/query_test/test_acid.py                      |  1 -
 5 files changed, 83 insertions(+), 24 deletions(-)

diff --git a/fe/src/main/java/org/apache/impala/util/AcidUtils.java b/fe/src/main/java/org/apache/impala/util/AcidUtils.java
index cb8b66f..3d294b1 100644
--- a/fe/src/main/java/org/apache/impala/util/AcidUtils.java
+++ b/fe/src/main/java/org/apache/impala/util/AcidUtils.java
@@ -224,7 +224,13 @@ public class AcidUtils {
       }
 
       // Not in a base or a delta directory. In that case, it's probably a post-upgrade
-      // file and we should include it.
+      // file.
+      // If there is no valid base: we should read the file (assuming that
+      // hive.mm.allow.originals == true)
+      // If there is a valid base: the file should be merged to the base by the
+      // compaction, so we can assume that the file is no longer valid and just
+      // waits to be deleted.
+      if (maxBaseWriteId != SENTINEL_BASE_WRITE_ID) it.remove();
     }
     return validStats;
   }
diff --git a/fe/src/test/java/org/apache/impala/util/AcidUtilsTest.java b/fe/src/test/java/org/apache/impala/util/AcidUtilsTest.java
index 4fc72a9..0019327 100644
--- a/fe/src/test/java/org/apache/impala/util/AcidUtilsTest.java
+++ b/fe/src/test/java/org/apache/impala/util/AcidUtilsTest.java
@@ -89,8 +89,6 @@ public class AcidUtilsTest {
           "base_00000100/def.txt"},
         "default.test:10:1234:1,2,3",
         new String[]{
-          "base_01.txt",
-          "post_upgrade.txt",
           "base_0000005/abc.txt",
           "base_0000005/0000/abc.txt"});
   }
@@ -155,28 +153,35 @@ public class AcidUtilsTest {
   @Test
   public void testAcidStateNoBase() {
     assertFiltering(new String[]{
-        "delta_000005_000005_0000/",
-        "delta_000005_000005_0000/lmn.txt",
-        "base_000010/",
-        "delta_0000012_0000012_0000/",
-        "delta_0000012_0000012_0000/0000_0",
-        "delta_0000012_0000012_0000/0000_1"},
-    "", // writeIdList that accepts all transactions as valid
-    new String[]{
-        "delta_0000012_0000012_0000/0000_0",
-        "delta_0000012_0000012_0000/0000_1"});
+            "base_01.txt",
+            "post_upgrade.txt",
+            "delta_000005_000005_0000/",
+            "delta_000005_000005_0000/lmn.txt",
+            "base_000010/",
+            "delta_0000012_0000012_0000/",
+            "delta_0000012_0000012_0000/0000_0",
+            "delta_0000012_0000012_0000/0000_1"},
+        "", // writeIdList that accepts all transactions as valid
+        new String[]{
+            // Post upgrade files are ignored if there is a valid base.
+            "delta_0000012_0000012_0000/0000_0",
+            "delta_0000012_0000012_0000/0000_1"});
 
     // Same set of files, but no base directory.
     assertFiltering(new String[]{
-        "delta_000005_000005_0000/",
-        "delta_000005_000005_0000/lmn.txt",
-        "delta_0000012_0000012_0000/",
-        "delta_0000012_0000012_0000/0000_0",
-        "delta_0000012_0000012_0000/0000_1"},
-    "", // writeIdList that accepts all transactions as valid
-    new String[]{
-        "delta_000005_000005_0000/lmn.txt",
-        "delta_0000012_0000012_0000/0000_0",
-        "delta_0000012_0000012_0000/0000_1"});
+            "base_01.txt",
+            "post_upgrade.txt",
+            "delta_000005_000005_0000/",
+            "delta_000005_000005_0000/lmn.txt",
+            "delta_0000012_0000012_0000/",
+            "delta_0000012_0000012_0000/0000_0",
+            "delta_0000012_0000012_0000/0000_1"},
+        "", // writeIdList that accepts all transactions as valid
+        new String[]{
+            "base_01.txt", // Post upgrade files are considered valid if there is no base.
+            "post_upgrade.txt",
+            "delta_000005_000005_0000/lmn.txt",
+            "delta_0000012_0000012_0000/0000_0",
+            "delta_0000012_0000012_0000/0000_1"});
   }
 }
diff --git a/testdata/workloads/functional-query/queries/QueryTest/acid-compaction.test b/testdata/workloads/functional-query/queries/QueryTest/acid-compaction.test
index 62b6700..608c2af 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/acid-compaction.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/acid-compaction.test
@@ -37,3 +37,34 @@ Path,Size,Partition
 row_regex:'$NAMENODE/test-warehouse/$DATABASE.db/tt/base_0000003_v\d+/000000_0','\d+B',''
 ---- TYPES
 STRING,STRING,STRING
+====
+---- HIVE_QUERY
+use $DATABASE;
+create table upgraded_table (x int);
+insert into upgraded_table values (1);
+# Upgrade to the table to insert only acid when there are already values in it.
+alter table upgraded_table set tblproperties
+ ('transactional' = 'true', 'transactional_properties' = 'insert_only');
+insert into upgraded_table values (2);
+insert into upgraded_table values (3);
+====
+---- QUERY
+invalidate metadata upgraded_table;
+select * from upgraded_table;
+---- RESULTS
+1
+2
+3
+====
+---- HIVE_QUERY
+use $DATABASE;
+alter table upgraded_table compact 'major' and wait;
+====
+---- QUERY
+refresh upgraded_table;
+select * from upgraded_table;
+---- RESULTS
+1
+2
+3
+====
diff --git a/testdata/workloads/functional-query/queries/QueryTest/acid.test b/testdata/workloads/functional-query/queries/QueryTest/acid.test
index f185d2d..3612e18 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/acid.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/acid.test
@@ -36,3 +36,21 @@ select * from tt order by x;
 3
 4
 ====
+---- HIVE_QUERY
+use $DATABASE;
+create table upgraded_table (x int);
+insert into upgraded_table values (1);
+# Upgrade to the table to insert only acid when there are already values in it.
+alter table upgraded_table set tblproperties
+ ('transactional' = 'true', 'transactional_properties' = 'insert_only');
+insert into upgraded_table values (2);
+insert into upgraded_table values (3);
+====
+---- QUERY
+invalidate metadata upgraded_table;
+select * from upgraded_table;
+---- RESULTS
+1
+2
+3
+====
diff --git a/tests/query_test/test_acid.py b/tests/query_test/test_acid.py
index 3b64c54..bd40a41 100644
--- a/tests/query_test/test_acid.py
+++ b/tests/query_test/test_acid.py
@@ -50,5 +50,4 @@ class TestAcid(ImpalaTestSuite):
 # TODO(todd): further tests to write:
 #  TRUNCATE, once HIVE-20137 is implemented.
 #  INSERT OVERWRITE with empty result set, once HIVE-21750 is fixed.
-#  Test for a post-upgrade Hive table which contains files not in ACID layout.
 #  Negative test for LOAD DATA INPATH and all other SQL that we don't support.


[impala] 02/08: IMPALA-8524: part2: Avoid calling "hive" via command line in EE tests

Posted by bo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

boroknagyz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 6839d9738b0d5c3c176fea4345ab623783915454
Author: Zoltan Borok-Nagy <bo...@cloudera.com>
AuthorDate: Wed May 22 17:05:25 2019 +0200

    IMPALA-8524: part2: Avoid calling "hive" via command line in EE tests
    
    "hive -e SQL..." without further parameters no longer works
    when USE_CDP_HIVE=true (it doesn't establish a connection).
    Some tests used this to load data.
    
    part2: there were some places that still called hive without
    a beeline connection.
    
    I had to break up views-compatibility.test for the different versions
    of Hive.
    
    Change-Id: Ia45b64cc1da78190e6f239a5f462308d7fa56f4b
    Reviewed-on: http://gerrit.cloudera.org:8080/13402
    Reviewed-by: Zoltan Borok-Nagy <bo...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 .../QueryTest/views-compatibility-hive2-only.test  | 30 ++++++++++++++++++++++
 .../QueryTest/views-compatibility-hive3-only.test  | 15 +++++++++++
 .../queries/QueryTest/views-compatibility.test     | 27 -------------------
 tests/metadata/test_views_compatibility.py         | 15 +++++++++--
 tests/query_test/test_scanners.py                  |  4 +--
 5 files changed, 60 insertions(+), 31 deletions(-)

diff --git a/testdata/workloads/functional-query/queries/QueryTest/views-compatibility-hive2-only.test b/testdata/workloads/functional-query/queries/QueryTest/views-compatibility-hive2-only.test
new file mode 100644
index 0000000..f28e9d7
--- /dev/null
+++ b/testdata/workloads/functional-query/queries/QueryTest/views-compatibility-hive2-only.test
@@ -0,0 +1,30 @@
+====
+---- CREATE_VIEW
+# Create a view in Impala with plan hints. Hive should recognize the hints as
+# comments and ignore them.
+create view test as
+select /* +straight_join */ a.* from functional.alltypestiny a
+inner join /* +broadcast */ functional.alltypes b on a.id = b.id
+inner join /* +shuffle */ functional.alltypessmall c on b.id = c.id;
+---- CREATE_VIEW_RESULTS
+IMPALA=SUCCESS
+HIVE=FAILURE
+---- QUERY_IMPALA_VIEW_RESULTS
+IMPALA=SUCCESS
+HIVE=SUCCESS
+====
+---- CREATE_VIEW
+# Create a view in Hive with plan hints. Impala should ignore the unknown hints.
+# TODO: move this to the common .test file once "HIVE-21782: Cannot use query hints
+# in views" is resolved.
+create view test as
+select /*+ MAPJOIN(alltypestiny) */ count(*) from
+functional.alltypes a inner join functional.alltypestiny b
+on (a.id = b.id);
+---- CREATE_VIEW_RESULTS
+IMPALA=SUCCESS
+HIVE=SUCCESS
+---- QUERY_HIVE_VIEW_RESULTS
+IMPALA=SUCCESS
+HIVE=SUCCESS
+====
diff --git a/testdata/workloads/functional-query/queries/QueryTest/views-compatibility-hive3-only.test b/testdata/workloads/functional-query/queries/QueryTest/views-compatibility-hive3-only.test
new file mode 100644
index 0000000..eb9abbf
--- /dev/null
+++ b/testdata/workloads/functional-query/queries/QueryTest/views-compatibility-hive3-only.test
@@ -0,0 +1,15 @@
+====
+---- CREATE_VIEW
+# Create a view in Impala with plan hints. Hive should recognize the hints as
+# comments and ignore them.
+create view test as
+select /* +straight_join */ a.* from functional.alltypestiny a
+inner join /* +broadcast */ functional.alltypes b on a.id = b.id
+inner join /* +shuffle */ functional.alltypessmall c on b.id = c.id;
+---- CREATE_VIEW_RESULTS
+IMPALA=SUCCESS
+HIVE=SUCCESS
+---- QUERY_IMPALA_VIEW_RESULTS
+IMPALA=SUCCESS
+HIVE=SUCCESS
+====
diff --git a/testdata/workloads/functional-query/queries/QueryTest/views-compatibility.test b/testdata/workloads/functional-query/queries/QueryTest/views-compatibility.test
index f90d9b4..2ca7a2d 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/views-compatibility.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/views-compatibility.test
@@ -232,30 +232,3 @@ IMPALA=SUCCESS
 ---- QUERY_IMPALA_VIEW_RESULTS
 HIVE=SUCCESS
 ====
----- CREATE_VIEW
-# Create a view in Impala with plan hints. Hive should recognize the hints as
-# comments and ignore them.
-create view test as
-select /* +straight_join */ a.* from functional.alltypestiny a
-inner join /* +broadcast */ functional.alltypes b on a.id = b.id
-inner join /* +shuffle */ functional.alltypessmall c on b.id = c.id;
----- CREATE_VIEW_RESULTS
-IMPALA=SUCCESS
-HIVE=FAILURE
----- QUERY_IMPALA_VIEW_RESULTS
-IMPALA=SUCCESS
-HIVE=SUCCESS
-====
----- CREATE_VIEW
-# Create a view in Hive with plan hints. Impala should ignore the unknown hints.
-create view test as
-select /*+ MAPJOIN(alltypestiny) */ count(*) from
-functional.alltypes a inner join functional.alltypestiny b
-on (a.id = b.id);
----- CREATE_VIEW_RESULTS
-IMPALA=SUCCESS
-HIVE=SUCCESS
----- QUERY_HIVE_VIEW_RESULTS
-IMPALA=SUCCESS
-HIVE=SUCCESS
-====
diff --git a/tests/metadata/test_views_compatibility.py b/tests/metadata/test_views_compatibility.py
index a0103e2..c0716a5 100644
--- a/tests/metadata/test_views_compatibility.py
+++ b/tests/metadata/test_views_compatibility.py
@@ -22,6 +22,7 @@ import shlex
 from subprocess import call
 
 from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
+from tests.common.environ import HIVE_MAJOR_VERSION
 from tests.common.impala_test_suite import ImpalaTestSuite
 from tests.common.skip import SkipIfS3, SkipIfABFS, SkipIfADLS, SkipIfIsilon, SkipIfLocal
 from tests.common.test_dimensions import create_uncompressed_text_dimension
@@ -75,6 +76,12 @@ class TestViewCompatibility(ImpalaTestSuite):
   def test_view_compatibility(self, vector, unique_database):
     self._run_view_compat_test_case('QueryTest/views-compatibility', vector,
       unique_database)
+    if HIVE_MAJOR_VERSION == 2:
+      self._run_view_compat_test_case('QueryTest/views-compatibility-hive2-only', vector,
+          unique_database)
+    if HIVE_MAJOR_VERSION >= 3:
+      self._run_view_compat_test_case('QueryTest/views-compatibility-hive3-only', vector,
+          unique_database)
 
   def _run_view_compat_test_case(self, test_file_name, vector, test_db_name):
     """
@@ -144,8 +151,12 @@ class TestViewCompatibility(ImpalaTestSuite):
                             test_case.get_create_view_sql('IMPALA'), None)
 
   def _exec_in_hive(self, sql_str, create_view_sql, exp_res):
-    hive_ret = call(['hive', '-e', sql_str])
-    self._cmp_expected(sql_str, create_view_sql, exp_res, "HIVE", hive_ret == 0)
+    try:
+      self.run_stmt_in_hive(sql_str)
+      success = True
+    except: # consider any exception a failure
+      success = False
+    self._cmp_expected(sql_str, create_view_sql, exp_res, "HIVE", success)
 
   def _exec_in_impala(self, sql_str, create_view_sql, exp_res):
     success = True
diff --git a/tests/query_test/test_scanners.py b/tests/query_test/test_scanners.py
index 4c4a00e..5d6e8c1 100644
--- a/tests/query_test/test_scanners.py
+++ b/tests/query_test/test_scanners.py
@@ -410,8 +410,8 @@ class TestParquet(ImpalaTestSuite):
         "    float_col, double_col,date_string_col,string_col,timestamp_col" \
         "  from functional_parquet.alltypes" \
         "  where year = {year} and month = {month}" % unique_database
-    check_call(['hive', '-e', hql_format.format(codec="snappy", year=2010, month=1)])
-    check_call(['hive', '-e', hql_format.format(codec="gzip", year=2010, month=2)])
+    self.run_stmt_in_hive(hql_format.format(codec="snappy", year=2010, month=1))
+    self.run_stmt_in_hive(hql_format.format(codec="gzip", year=2010, month=2))
 
     test_files = ["testdata/multi_compression_parquet_data/tinytable_0_gzip_snappy.parq",
                   "testdata/multi_compression_parquet_data/tinytable_1_snappy_gzip.parq"]


[impala] 08/08: IMPALA-8369: Add HIVE_MAJOR_VERSION section to planner tests + some fixes

Posted by bo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

boroknagyz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 5ce57cafb20f4627054c6c1d0d2a79e9ae97fd78
Author: Csaba Ringhofer <cs...@cloudera.com>
AuthorDate: Thu May 23 21:49:30 2019 +0200

    IMPALA-8369: Add HIVE_MAJOR_VERSION section to planner tests + some fixes
    
    Hive 3 creates different number of files for some tables than Hive2,
    which broke some test cases in resource-requirements.test. The fix
    is to run different versions of these tests depending on Hive version.
    
    This is done by adding a new section HIVE_MAJOR_VERSION, which leads
    to skipping the given test case if the Hive version is different in
    the cluster, e.g.:
    --- HIVE_MAJOR_VERSION
    3
    
    Change-Id: Ied7ba7911da23cbca12149e062f4e1a444613a36
    Reviewed-on: http://gerrit.cloudera.org:8080/13414
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
    Reviewed-by: Csaba Ringhofer <cs...@cloudera.com>
---
 .../org/apache/impala/planner/PlannerTestBase.java |  12 ++
 .../org/apache/impala/testutil/TestFileParser.java |   3 +-
 .../queries/PlannerTest/resource-requirements.test | 140 +++++++++++++++++++++
 3 files changed, 154 insertions(+), 1 deletion(-)

diff --git a/fe/src/test/java/org/apache/impala/planner/PlannerTestBase.java b/fe/src/test/java/org/apache/impala/planner/PlannerTestBase.java
index dc420f8..2917b08 100644
--- a/fe/src/test/java/org/apache/impala/planner/PlannerTestBase.java
+++ b/fe/src/test/java/org/apache/impala/planner/PlannerTestBase.java
@@ -876,6 +876,18 @@ public class PlannerTestBase extends FrontendTestBase {
     for (TestCase testCase : queryFileParser.getTestCases()) {
       actualOutput.append(testCase.getSectionAsString(Section.QUERY, true, "\n"));
       actualOutput.append("\n");
+
+      String neededHiveMajorVersion =
+          testCase.getSectionAsString(Section.HIVE_MAJOR_VERSION, false, "");
+      if (neededHiveMajorVersion != null && !neededHiveMajorVersion.isEmpty() &&
+          Integer.parseInt(neededHiveMajorVersion) != TestUtils.getHiveMajorVersion()) {
+        actualOutput.append("Skipping test case (needs Hive major version: ");
+        actualOutput.append(neededHiveMajorVersion);
+        actualOutput.append(")\n");
+        actualOutput.append("====\n");
+        continue;
+      }
+
       String queryOptionsSection = testCase.getSectionAsString(
           Section.QUERYOPTIONS, true, "\n");
       if (queryOptionsSection != null && !queryOptionsSection.isEmpty()) {
diff --git a/fe/src/test/java/org/apache/impala/testutil/TestFileParser.java b/fe/src/test/java/org/apache/impala/testutil/TestFileParser.java
index fb04704..c583de8 100644
--- a/fe/src/test/java/org/apache/impala/testutil/TestFileParser.java
+++ b/fe/src/test/java/org/apache/impala/testutil/TestFileParser.java
@@ -95,7 +95,8 @@ public class TestFileParser {
     ERRORS,
     SCANRANGELOCATIONS,
     LINEAGE,
-    QUERYOPTIONS;
+    QUERYOPTIONS,
+    HIVE_MAJOR_VERSION;
 
     // Return header line for this section
     public String getHeader() {
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/resource-requirements.test b/testdata/workloads/functional-planner/queries/PlannerTest/resource-requirements.test
index fb06170..5faf6d3 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/resource-requirements.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/resource-requirements.test
@@ -1102,6 +1102,9 @@ Per-Host Resources: mem-estimate=32.00MB mem-reservation=64.00KB thread-reservat
 ====
 # Avro scan.
 select * from tpch_avro.orders
+---- HIVE_MAJOR_VERSION
+# Hive 3 creates different number of files for this table than Hive 2.
+2
 ---- PLAN
 Max Per-Host Resource Reservation: Memory=8.00MB Threads=2
 Per-Host Resource Estimates: Memory=88MB
@@ -1180,6 +1183,88 @@ Per-Host Resources: mem-estimate=176.00MB mem-reservation=16.00MB thread-reserva
    tuple-ids=0 row-size=88B cardinality=unavailable
    in pipelines: 00(GETNEXT)
 ====
+select * from tpch_avro.orders
+---- HIVE_MAJOR_VERSION
+# Hive 3 creates different number of files for this table than Hive 2.
+3
+---- PLAN
+Max Per-Host Resource Reservation: Memory=8.00MB Threads=2
+Per-Host Resource Estimates: Memory=64MB
+WARNING: The following tables are missing relevant table and/or column statistics.
+tpch_avro.orders
+Analyzed query: SELECT * FROM tpch_avro.orders
+
+F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
+|  Per-Host Resources: mem-estimate=64.00MB mem-reservation=8.00MB thread-reservation=2
+PLAN-ROOT SINK
+|  mem-estimate=0B mem-reservation=0B thread-reservation=0
+|
+00:SCAN HDFS [tpch_avro.orders]
+   HDFS partitions=1/1 files=3 size=156.92MB
+   stored statistics:
+     table: rows=unavailable size=156.92MB
+     columns: unavailable
+   extrapolated-rows=disabled max-scan-range-rows=unavailable
+   mem-estimate=64.00MB mem-reservation=8.00MB thread-reservation=1
+   tuple-ids=0 row-size=88B cardinality=unavailable
+   in pipelines: 00(GETNEXT)
+---- DISTRIBUTEDPLAN
+Max Per-Host Resource Reservation: Memory=8.00MB Threads=3
+Per-Host Resource Estimates: Memory=64MB
+WARNING: The following tables are missing relevant table and/or column statistics.
+tpch_avro.orders
+Analyzed query: SELECT * FROM tpch_avro.orders
+
+F01:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
+|  Per-Host Resources: mem-estimate=275.97KB mem-reservation=0B thread-reservation=1
+PLAN-ROOT SINK
+|  mem-estimate=0B mem-reservation=0B thread-reservation=0
+|
+01:EXCHANGE [UNPARTITIONED]
+|  mem-estimate=275.97KB mem-reservation=0B thread-reservation=0
+|  tuple-ids=0 row-size=88B cardinality=unavailable
+|  in pipelines: 00(GETNEXT)
+|
+F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=3
+Per-Host Resources: mem-estimate=64.00MB mem-reservation=8.00MB thread-reservation=2
+00:SCAN HDFS [tpch_avro.orders, RANDOM]
+   HDFS partitions=1/1 files=3 size=156.92MB
+   stored statistics:
+     table: rows=unavailable size=156.92MB
+     columns: unavailable
+   extrapolated-rows=disabled max-scan-range-rows=unavailable
+   mem-estimate=64.00MB mem-reservation=8.00MB thread-reservation=1
+   tuple-ids=0 row-size=88B cardinality=unavailable
+   in pipelines: 00(GETNEXT)
+---- PARALLELPLANS
+Max Per-Host Resource Reservation: Memory=16.00MB Threads=5
+Per-Host Resource Estimates: Memory=129MB
+WARNING: The following tables are missing relevant table and/or column statistics.
+tpch_avro.orders
+Analyzed query: SELECT * FROM tpch_avro.orders
+
+F01:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
+|  Per-Host Resources: mem-estimate=551.97KB mem-reservation=0B thread-reservation=1
+PLAN-ROOT SINK
+|  mem-estimate=0B mem-reservation=0B thread-reservation=0
+|
+01:EXCHANGE [UNPARTITIONED]
+|  mem-estimate=551.97KB mem-reservation=0B thread-reservation=0
+|  tuple-ids=0 row-size=88B cardinality=unavailable
+|  in pipelines: 00(GETNEXT)
+|
+F00:PLAN FRAGMENT [RANDOM] hosts=3 instances=6
+Per-Host Resources: mem-estimate=128.00MB mem-reservation=16.00MB thread-reservation=4
+00:SCAN HDFS [tpch_avro.orders, RANDOM]
+   HDFS partitions=1/1 files=3 size=156.92MB
+   stored statistics:
+     table: rows=unavailable size=156.92MB
+     columns: unavailable
+   extrapolated-rows=disabled max-scan-range-rows=unavailable
+   mem-estimate=64.00MB mem-reservation=8.00MB thread-reservation=1
+   tuple-ids=0 row-size=88B cardinality=unavailable
+   in pipelines: 00(GETNEXT)
+====
 # RC scan.
 select * from tpch_rc.customer
 ---- PLAN
@@ -1342,6 +1427,9 @@ Per-Host Resources: mem-estimate=32.00MB mem-reservation=16.00MB thread-reservat
 ====
 # ORC scan
 select * from tpch_orc_def.lineitem
+---- HIVE_MAJOR_VERSION
+# Hive 3 creates different number of files for this table than Hive 2.
+2
 ---- PLAN
 Max Per-Host Resource Reservation: Memory=8.00MB Threads=2
 Per-Host Resource Estimates: Memory=40MB
@@ -1362,8 +1450,36 @@ PLAN-ROOT SINK
    tuple-ids=0 row-size=231B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ====
+# ORC scan
+select * from tpch_orc_def.lineitem
+---- HIVE_MAJOR_VERSION
+# Hive 3 creates different number of files for this table than Hive 2.
+3
+---- PLAN
+Max Per-Host Resource Reservation: Memory=8.00MB Threads=2
+Per-Host Resource Estimates: Memory=88MB
+Analyzed query: SELECT * FROM tpch_orc_def.lineitem
+
+F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
+|  Per-Host Resources: mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=2
+PLAN-ROOT SINK
+|  mem-estimate=0B mem-reservation=0B thread-reservation=0
+|
+00:SCAN HDFS [tpch_orc_def.lineitem]
+   HDFS partitions=1/1 files=1 size=142.84MB
+   stored statistics:
+     table: rows=6.00M size=142.84MB
+     columns: all
+   extrapolated-rows=disabled max-scan-range-rows=6.00M
+   mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
+   tuple-ids=0 row-size=231B cardinality=6.00M
+   in pipelines: 00(GETNEXT)
+====
 # Single column ORC scan - memory reservation is same as multi-column scan.
 select l_comment from tpch_orc_def.lineitem
+---- HIVE_MAJOR_VERSION
+# Hive 3 creates different number of files for this table than Hive 2.
+2
 ---- PLAN
 Max Per-Host Resource Reservation: Memory=8.00MB Threads=2
 Per-Host Resource Estimates: Memory=40MB
@@ -1384,6 +1500,30 @@ PLAN-ROOT SINK
    tuple-ids=0 row-size=38B cardinality=6.00M
    in pipelines: 00(GETNEXT)
 ====
+select l_comment from tpch_orc_def.lineitem
+---- HIVE_MAJOR_VERSION
+# Hive 3 creates different number of files for this table than Hive 2.
+3
+---- PLAN
+Max Per-Host Resource Reservation: Memory=8.00MB Threads=2
+Per-Host Resource Estimates: Memory=88MB
+Analyzed query: SELECT l_comment FROM tpch_orc_def.lineitem
+
+F00:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
+|  Per-Host Resources: mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=2
+PLAN-ROOT SINK
+|  mem-estimate=0B mem-reservation=0B thread-reservation=0
+|
+00:SCAN HDFS [tpch_orc_def.lineitem]
+   HDFS partitions=1/1 files=1 size=142.84MB
+   stored statistics:
+     table: rows=6.00M size=142.84MB
+     columns: all
+   extrapolated-rows=disabled max-scan-range-rows=6.00M
+   mem-estimate=88.00MB mem-reservation=8.00MB thread-reservation=1
+   tuple-ids=0 row-size=38B cardinality=6.00M
+   in pipelines: 00(GETNEXT)
+====
 # ORC scan on small files - memory reservation is reduced.
 select * from functional_orc_def.alltypes
 ---- PLAN


[impala] 05/08: IMPALA-6903: Download profile from WebUI in text format

Posted by bo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

boroknagyz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit b00d031fb37b83924d4dfa020360f35540717501
Author: Yongzhi Chen <yc...@cloudera.com>
AuthorDate: Mon May 13 20:33:44 2019 -0400

    IMPALA-6903: Download profile from WebUI in text format
    
    Add a link called "Download Text Profile" to profile tab.
    The link allows users to download runtime profiles as UTF-8
    encoded file.
    Get text profile from backend by making http request.
    
    Tests:
    Manually tested to check downloaded files.
    Ran all core tests.
    Add test_download_text_profile to test_web_pages.py.
    
    Change-Id: Ie030c2bb330211f51840417b9f7880f19174af7b
    Reviewed-on: http://gerrit.cloudera.org:8080/13333
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 be/src/service/impala-http-handler.cc | 19 ++++++++++++++++---
 be/src/service/impala-http-handler.h  |  9 +++++++++
 tests/webserver/test_web_pages.py     | 22 ++++++++++++++++++++++
 www/query_profile.tmpl                | 10 +++++++++-
 4 files changed, 56 insertions(+), 4 deletions(-)

diff --git a/be/src/service/impala-http-handler.cc b/be/src/service/impala-http-handler.cc
index aef22b0..3cfd65a 100644
--- a/be/src/service/impala-http-handler.cc
+++ b/be/src/service/impala-http-handler.cc
@@ -124,6 +124,9 @@ void ImpalaHttpHandler::RegisterHandlers(Webserver* webserver) {
   webserver->RegisterUrlCallback("/query_profile_encoded", "raw_text.tmpl",
       MakeCallback(this, &ImpalaHttpHandler::QueryProfileEncodedHandler), false);
 
+  webserver->RegisterUrlCallback("/query_profile_plain_text", "raw_text.tmpl",
+        MakeCallback(this, &ImpalaHttpHandler::QueryProfileTextHandler), false);
+
   webserver->RegisterUrlCallback("/inflight_query_ids", "raw_text.tmpl",
       MakeCallback(this, &ImpalaHttpHandler::InflightQueryIdsHandler), false);
 
@@ -239,8 +242,8 @@ void ImpalaHttpHandler::QueryProfileHandler(const Webserver::WebRequest& req,
   document->AddMember("query_id", query_id, document->GetAllocator());
 }
 
-void ImpalaHttpHandler::QueryProfileEncodedHandler(const Webserver::WebRequest& req,
-    Document* document) {
+void ImpalaHttpHandler::QueryProfileHelper(const Webserver::WebRequest& req,
+    Document* document, TRuntimeProfileFormat::type format) {
   TUniqueId unique_id;
   stringstream ss;
   Status status = ParseIdFromRequest(req, &unique_id, "query_id");
@@ -248,7 +251,7 @@ void ImpalaHttpHandler::QueryProfileEncodedHandler(const Webserver::WebRequest&
     ss << status.GetDetail();
   } else {
     Status status = server_->GetRuntimeProfileOutput(
-        unique_id, "", TRuntimeProfileFormat::BASE64, &ss, nullptr);
+      unique_id, "", format, &ss, nullptr);
     if (!status.ok()) {
       ss.str(Substitute("Could not obtain runtime profile: $0", status.GetDetail()));
     }
@@ -259,6 +262,16 @@ void ImpalaHttpHandler::QueryProfileEncodedHandler(const Webserver::WebRequest&
   document->AddMember("contents", profile, document->GetAllocator());
 }
 
+void ImpalaHttpHandler::QueryProfileEncodedHandler(const Webserver::WebRequest& req,
+    Document* document) {
+  QueryProfileHelper(req, document, TRuntimeProfileFormat::BASE64);
+}
+
+void ImpalaHttpHandler::QueryProfileTextHandler(const Webserver::WebRequest& req,
+    Document* document) {
+  QueryProfileHelper(req, document, TRuntimeProfileFormat::STRING);
+}
+
 void ImpalaHttpHandler::InflightQueryIdsHandler(const Webserver::WebRequest& req,
     Document* document) {
   stringstream ss;
diff --git a/be/src/service/impala-http-handler.h b/be/src/service/impala-http-handler.h
index 07530cb..ce742a4 100644
--- a/be/src/service/impala-http-handler.h
+++ b/be/src/service/impala-http-handler.h
@@ -109,11 +109,20 @@ class ImpalaHttpHandler {
   void CloseSessionHandler(const Webserver::WebRequest& req,
       rapidjson::Document* document);
 
+  /// Helper method to put query profile in 'document' with required format.
+  void QueryProfileHelper(const Webserver::WebRequest& req,
+      rapidjson::Document* document, TRuntimeProfileFormat::type format);
+
   /// Upon return, 'document' will contain the query profile as a base64 encoded object in
   /// 'contents'.
   void QueryProfileEncodedHandler(const Webserver::WebRequest& req,
       rapidjson::Document* document);
 
+  /// Upon return, 'document' will contain the query profile as a utf8 string in
+  /// 'contents'.
+  void QueryProfileTextHandler(const Webserver::WebRequest& req,
+      rapidjson::Document* document);
+
   /// Produces a list of inflight query IDs printed as text in 'contents'.
   void InflightQueryIdsHandler(const Webserver::WebRequest& req,
       rapidjson::Document* document);
diff --git a/tests/webserver/test_web_pages.py b/tests/webserver/test_web_pages.py
index 2960599..581df17 100644
--- a/tests/webserver/test_web_pages.py
+++ b/tests/webserver/test_web_pages.py
@@ -542,3 +542,25 @@ class TestWebPage(ImpalaTestSuite):
     assert backend_row['is_executor']
     assert not backend_row['is_quiescing']
     assert len(backend_row['admit_mem_limit']) > 0
+
+  def test_download_text_profile(self):
+    """Test download text profile for a query"""
+    query = "select count(*) from functional.alltypes"
+    query_id = self.client.execute(query).query_id
+    profile_page_url = "{0}query_profile?query_id={1}".format(
+        self.ROOT_URL, query_id)
+    # Check the text download tag is there.
+    responses = self.get_and_check_status(
+        profile_page_url, "Download Text Profile",
+        ports_to_test=self.IMPALAD_TEST_PORT)
+    assert len(responses) == 1
+    download_link = "query_profile_plain_text?query_id={0}".format(
+        query_id)
+    assert download_link in responses[0].text
+    # Get the response from download link and validate it by checking
+    # the query is in the file.
+    responses = self.get_and_check_status(
+        self.ROOT_URL + download_link, query, self.IMPALAD_TEST_PORT)
+    # Check the query id is in the content of the reponse.
+    assert len(responses) == 1
+    assert query_id in responses[0].text
diff --git a/www/query_profile.tmpl b/www/query_profile.tmpl
index 4103227..4f93fdb 100644
--- a/www/query_profile.tmpl
+++ b/www/query_profile.tmpl
@@ -25,11 +25,19 @@ under the License.
 {{> www/query_detail_tabs.tmpl }}
 
 <h4>
-  <a href="/query_profile_encoded?query_id={{query_id}}" download="profile_{{query_id}}">
+  <a href="/query_profile_encoded?query_id={{query_id}}"
+      download="thrift_profile_{{query_id}}">
     Download Thrift Profile
   </a>
 </h4>
 
+<h4>
+  <a href="/query_profile_plain_text?query_id={{query_id}}"
+      download="profile_{{query_id}}">
+   Download Text Profile
+  </a>
+</h4>
+
 <pre>{{profile}}</pre>
 
 <script>


[impala] 04/08: IMPALA-8248: Improve Ranger test coverage

Posted by bo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

boroknagyz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 377471fb4b2794caae63601f937b6c4a48ef2cd3
Author: Austin Nobis <an...@cloudera.com>
AuthorDate: Thu May 23 08:28:59 2019 -0700

    IMPALA-8248: Improve Ranger test coverage
    
    This patch adds increased coverage for Apache Ranger integration.
    Specifically, tests were added that interact directly with Apache Ranger
    via the REST API and then assertions were made against Impala to test
    proper behavior.
    
    Testing:
    - Ran all E2E authorization tests
    - Added a test that adds hive privileges to Ranger and verifies they do
      not show in Impala.
    - Added a test that grants privileges to Ranger via the UI and runs
      "refresh authorization" to verify they exist in Impala.
    
    Change-Id: I15ce57ea96fbf6bff9bcabf7300fbadea7c55b09
    Reviewed-on: http://gerrit.cloudera.org:8080/13413
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 tests/authorization/test_ranger.py | 175 +++++++++++++++++++++++++++++++++++++
 1 file changed, 175 insertions(+)

diff --git a/tests/authorization/test_ranger.py b/tests/authorization/test_ranger.py
index c4bb2fa..83ca5e2 100644
--- a/tests/authorization/test_ranger.py
+++ b/tests/authorization/test_ranger.py
@@ -327,6 +327,181 @@ class TestRanger(CustomClusterTestSuite):
       admin_client.execute("revoke select(x) on table {0}.{1} from {2} {3}"
                            .format(unique_database, unique_table, kw, id))
 
+  @CustomClusterTestSuite.with_args(
+    impalad_args=IMPALAD_ARGS, catalogd_args=CATALOGD_ARGS)
+  def test_grant_revoke_ranger_api(self, unique_name):
+    user = getuser()
+    admin_client = self.create_impala_client()
+    unique_db = unique_name + "_db"
+    resource = {
+      "database": unique_db,
+      "column": "*",
+      "table": "*"
+    }
+    access = ["select", "create"]
+
+    try:
+      # Create the test database
+      admin_client.execute("drop database if exists {0} cascade".format(unique_db),
+                           user=ADMIN)
+      admin_client.execute("create database {0}".format(unique_db), user=ADMIN)
+
+      # Grant privileges via Ranger REST API
+      TestRanger._grant_ranger_privilege(user, resource, access)
+
+      # Privileges should be stale before a refresh
+      result = self.client.execute("show grant user {0} on database {1}"
+                                   .format(user, unique_db))
+      TestRanger._check_privileges(result, [])
+
+      # Refresh and check updated privileges
+      admin_client.execute("refresh authorization")
+      result = self.client.execute("show grant user {0} on database {1}"
+                                   .format(user, unique_db))
+
+      TestRanger._check_privileges(result, [
+        ["USER", user, unique_db, "*", "*", "", "", "create", "false"],
+        ["USER", user, unique_db, "*", "*", "", "", "select", "false"]
+      ])
+
+      # Revoke privileges via Ranger REST API
+      TestRanger._revoke_ranger_privilege(user, resource, access)
+
+      # Privileges should be stale before a refresh
+      result = self.client.execute("show grant user {0} on database {1}"
+                                   .format(user, unique_db))
+      TestRanger._check_privileges(result, [
+        ["USER", user, unique_db, "*", "*", "", "", "create", "false"],
+        ["USER", user, unique_db, "*", "*", "", "", "select", "false"]
+      ])
+
+      # Refresh and check updated privileges
+      admin_client.execute("refresh authorization")
+      result = self.client.execute("show grant user {0} on database {1}"
+                                   .format(user, unique_db))
+
+      TestRanger._check_privileges(result, [])
+    finally:
+      admin_client.execute("revoke all on database {0} from user {1}"
+                           .format(unique_db, user))
+      admin_client.execute("drop database if exists {0} cascade".format(unique_db),
+                           user=ADMIN)
+
+  @CustomClusterTestSuite.with_args(
+    impalad_args=IMPALAD_ARGS, catalogd_args=CATALOGD_ARGS)
+  def test_show_grant_hive_privilege(self, unique_name):
+    user = getuser()
+    admin_client = self.create_impala_client()
+    unique_db = unique_name + "_db"
+    resource = {
+      "database": unique_db,
+      "column": "*",
+      "table": "*"
+    }
+    access = ["lock", "select"]
+
+    try:
+      TestRanger._grant_ranger_privilege(user, resource, access)
+      admin_client.execute("drop database if exists {0} cascade".format(unique_db),
+                           user=ADMIN)
+      admin_client.execute("create database {0}".format(unique_db), user=ADMIN)
+
+      admin_client.execute("refresh authorization")
+      result = self.client.execute("show grant user {0} on database {1}"
+                                   .format(user, unique_db))
+
+      TestRanger._check_privileges(result, [
+        ["USER", user, unique_db, "*", "*", "", "", "select", "false"]
+      ])
+
+      # Assert that lock, select privilege exists in Ranger server
+      assert "lock" in TestRanger._get_ranger_privileges_db(user, unique_db)
+      assert "select" in TestRanger._get_ranger_privileges_db(user, unique_db)
+
+      admin_client.execute("revoke select on database {0} from user {1}"
+                           .format(unique_db, user))
+
+      # Assert that lock is still present and select is revoked in Ranger server
+      assert "lock" in TestRanger._get_ranger_privileges_db(user, unique_db)
+      assert "select" not in TestRanger._get_ranger_privileges_db(user, unique_db)
+
+      admin_client.execute("refresh authorization")
+      result = self.client.execute("show grant user {0} on database {1}"
+                                   .format(user, unique_db))
+
+      TestRanger._check_privileges(result, [])
+    finally:
+      admin_client.execute("drop database if exists {0} cascade".format(unique_db),
+                           user=ADMIN)
+      TestRanger._revoke_ranger_privilege(user, resource, access)
+
+  @staticmethod
+  def _grant_ranger_privilege(user, resource, access):
+    data = {
+      "grantor": ADMIN,
+      "grantorGroups": [],
+      "resource": resource,
+      "users": [user],
+      "groups": [],
+      "accessTypes": access,
+      "delegateAdmin": "false",
+      "enableAudit": "true",
+      "replaceExistingPermissions": "false",
+      "isRecursive": "false",
+      "clusterName": "server1"
+    }
+
+    headers = {"Content-Type": "application/json", "Accept": "application/json"}
+    r = requests.post("{0}/service/plugins/services/grant/test_impala?pluginId=impala"
+                      .format(RANGER_HOST),
+                      auth=RANGER_AUTH, json=data, headers=headers)
+    assert 200 <= r.status_code < 300
+
+  @staticmethod
+  def _revoke_ranger_privilege(user, resource, access):
+    data = {
+      "grantor": ADMIN,
+      "grantorGroups": [],
+      "resource": resource,
+      "users": [user],
+      "groups": [],
+      "accessTypes": access,
+      "delegateAdmin": "false",
+      "enableAudit": "true",
+      "replaceExistingPermissions": "false",
+      "isRecursive": "false",
+      "clusterName": "server1"
+    }
+
+    headers = {"Content-Type": "application/json", "Accept": "application/json"}
+    r = requests.post("{0}/service/plugins/services/revoke/test_impala?pluginId=impala"
+                      .format(RANGER_HOST),
+                      auth=RANGER_AUTH, json=data, headers=headers)
+    assert 200 <= r.status_code < 300
+
+  @staticmethod
+  def _get_ranger_privileges_db(user, db):
+    policies = TestRanger._get_ranger_privileges(user)
+    result = []
+
+    for policy in policies:
+      resources = policy["resources"]
+      if "database" in resources and db in resources["database"]["values"]:
+        for policy_items in policy["policyItems"]:
+          if user in policy_items["users"]:
+            for access in policy_items["accesses"]:
+              result.append(access["type"])
+
+    return result
+
+  @staticmethod
+  def _get_ranger_privileges(user):
+    headers = {"Content-Type": "application/json", "Accept": "application/json"}
+    r = requests.get("{0}/service/plugins/policies"
+                     .format(RANGER_HOST),
+                     auth=RANGER_AUTH, headers=headers)
+    return json.loads(r.content)["policies"]
+
   def _add_ranger_user(self, user):
     data = {"name": user, "password": "password123", "userRoleList": ["ROLE_USER"]}
     headers = {"Content-Type": "application/json", "Accept": "application/json"}