You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by bo...@apache.org on 2019/08/27 12:19:16 UTC

[impala] branch master updated (5fde84a -> 3f4cbe9)

This is an automated email from the ASF dual-hosted git repository.

boroknagyz pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git.


    from 5fde84a  IMPALA-8584: Add cookie support to the HTTP HS2 server
     new 1410666  Remove redundant table name population in Kudu integration
     new 1c4bdce  IMPALA-8845: Cancel receiver's streams on exchange node's EOS
     new 0018b71  IMPALA-8760: Disable TestAdmissionControllerStress tests for CentOS 6
     new 3f4cbe9  IMPALA-8889: Fix error messages for unsupported operations on acid tables

The 4 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 be/src/exec/exchange-node.cc                       | 18 ++++--
 be/src/exec/exchange-node.h                        |  8 +++
 be/src/runtime/krpc-data-stream-mgr.cc             |  3 +-
 be/src/runtime/krpc-data-stream-recvr.cc           | 12 +++-
 be/src/runtime/krpc-data-stream-recvr.h            |  8 +--
 .../org/apache/impala/analysis/AlterTableStmt.java |  2 +-
 .../java/org/apache/impala/analysis/Analyzer.java  | 13 ++---
 .../org/apache/impala/analysis/DropStatsStmt.java  |  2 +-
 .../org/apache/impala/analysis/LoadDataStmt.java   |  2 +-
 .../org/apache/impala/analysis/TruncateStmt.java   |  2 +-
 .../java/org/apache/impala/catalog/KuduTable.java  | 25 --------
 .../org/apache/impala/analysis/AnalyzerTest.java   | 19 +++---
 .../queries/QueryTest/acid-negative.test           |  4 +-
 tests/common/environ.py                            | 12 ++--
 tests/common/skip.py                               |  8 ++-
 tests/custom_cluster/test_admission_controller.py  |  6 +-
 tests/custom_cluster/test_exchange_eos.py          | 67 ++++++++++++++++++++++
 17 files changed, 142 insertions(+), 69 deletions(-)
 create mode 100644 tests/custom_cluster/test_exchange_eos.py


[impala] 04/04: IMPALA-8889: Fix error messages for unsupported operations on acid tables

Posted by bo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

boroknagyz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 3f4cbe961702552a187c0bbc04b616e0029c1412
Author: Yongzhi Chen <yc...@cloudera.com>
AuthorDate: Fri Aug 23 16:17:46 2019 -0400

    IMPALA-8889: Fix error messages for unsupported operations on acid tables
    
    Provides up-to-date error messages for acid tables.
    Makes minor code change for ensureTableWriteSupported after
    HIVEMANAGEDINSERTWRITE is enabled.
    
    Tests:
    Fixed and tested AnalyerTest
    Fixed acid-negative test
    Ran all core tests for Hive 3
    
    Change-Id: I732bf651405c9ed75d1843390050b786720e3ffe
    Reviewed-on: http://gerrit.cloudera.org:8080/14133
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
    Reviewed-by: Zoltan Borok-Nagy <bo...@cloudera.com>
---
 .../org/apache/impala/analysis/AlterTableStmt.java    |  2 +-
 .../java/org/apache/impala/analysis/Analyzer.java     | 13 ++++++-------
 .../org/apache/impala/analysis/DropStatsStmt.java     |  2 +-
 .../java/org/apache/impala/analysis/LoadDataStmt.java |  2 +-
 .../java/org/apache/impala/analysis/TruncateStmt.java |  2 +-
 .../java/org/apache/impala/analysis/AnalyzerTest.java | 19 +++++++------------
 .../queries/QueryTest/acid-negative.test              |  4 ++--
 7 files changed, 19 insertions(+), 25 deletions(-)

diff --git a/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java b/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
index 47f4ca1..79c3b64 100644
--- a/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/AlterTableStmt.java
@@ -91,7 +91,7 @@ public abstract class AlterTableStmt extends StatementBase {
     Preconditions.checkState(tableRef instanceof BaseTableRef);
     table_ = tableRef.getTable();
     analyzer.checkTableCapability(table_, Analyzer.OperationType.WRITE);
-    analyzer.ensureTableNotTransactional(table_);
+    analyzer.ensureTableNotTransactional(table_, "ALTER TABLE");
     if (table_ instanceof FeDataSourceTable
         && !(this instanceof AlterTableSetColumnStats)) {
       throw new AnalysisException(String.format(
diff --git a/fe/src/main/java/org/apache/impala/analysis/Analyzer.java b/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
index 75c1fbe..3936722 100644
--- a/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
+++ b/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
@@ -145,8 +145,7 @@ public class Analyzer {
       "Table %s not supported. Transactional (ACID) tables are " +
       "only supported when they are configured as insert_only.";
   private static final String TRANSACTIONAL_TABLE_NOT_SUPPORTED =
-      "Table %s not supported. Transactional (ACID) tables are " +
-      "only supported for read.";
+      "%s not supported on transactional (ACID) table: %s" ;
   private static final String BUCKETED_TABLE_NOT_SUPPORTED =
       "%s is a bucketed table. Only read operations are supported on such tables.";
   private static final String TABLE_NOT_SUPPORTED =
@@ -236,11 +235,11 @@ public class Analyzer {
         table.getFullName());
   }
 
-  public static void ensureTableNotTransactional(FeTable table)
+  public static void ensureTableNotTransactional(FeTable table, String operationStr)
       throws AnalysisException {
     if (AcidUtils.isTransactionalTable(table.getMetaStoreTable().getParameters())) {
       throw new AnalysisException(String.format(TRANSACTIONAL_TABLE_NOT_SUPPORTED,
-          table.getFullName()));
+          operationStr, table.getFullName()));
     }
   }
 
@@ -292,13 +291,13 @@ public class Analyzer {
       if (KuduTable.isKuduTable(table.getMetaStoreTable())) return;
       if (!MetastoreShim.hasTableCapability(table.getMetaStoreTable(), writeRequires)) {
         // Error messages with explanations.
-        ensureTableNotTransactional(table);
+        ensureTableNotFullAcid(table);
         throw new AnalysisException(String.format(TABLE_NOT_SUPPORTED, "Write",
             table.getFullName(),
             MetastoreShim.getTableAccessType(table.getMetaStoreTable())));
       }
     } else {
-      ensureTableNotTransactional(table);
+      ensureTableNotTransactional(table, "Write");
     }
   }
 
@@ -322,7 +321,7 @@ public class Analyzer {
             MetastoreShim.getTableAccessType(table.getMetaStoreTable())));
       }
     } else {
-      ensureTableNotTransactional(table);
+      ensureTableNotTransactional(table, "Operation");
     }
   }
 
diff --git a/fe/src/main/java/org/apache/impala/analysis/DropStatsStmt.java b/fe/src/main/java/org/apache/impala/analysis/DropStatsStmt.java
index 6ec1868..de91981 100644
--- a/fe/src/main/java/org/apache/impala/analysis/DropStatsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/DropStatsStmt.java
@@ -103,7 +103,7 @@ public class DropStatsStmt extends StatementBase {
     }
     tableRef_.analyze(analyzer);
     // There is no transactional HMS API to drop stats at the moment (HIVE-22104).
-    analyzer.ensureTableNotTransactional(tableRef_.getTable());
+    analyzer.ensureTableNotTransactional(tableRef_.getTable(), "DROP STATS");
     if (partitionSet_ != null) {
       partitionSet_.setTableName(tableRef_.getTable().getTableName());
       partitionSet_.setPrivilegeRequirement(Privilege.ALTER);
diff --git a/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java b/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java
index ba3d01a..90ab829 100644
--- a/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/LoadDataStmt.java
@@ -110,7 +110,7 @@ public class LoadDataStmt extends StatementBase {
           dbName_ + "." + getTbl());
     }
     analyzer.checkTableCapability(table, Analyzer.OperationType.WRITE);
-    analyzer.ensureTableNotTransactional(table);
+    analyzer.ensureTableNotTransactional(table, "LOAD DATA");
 
     // Analyze the partition spec, if one was specified.
     if (partitionSpec_ != null) {
diff --git a/fe/src/main/java/org/apache/impala/analysis/TruncateStmt.java b/fe/src/main/java/org/apache/impala/analysis/TruncateStmt.java
index 15c0a3e..b08b3ca 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TruncateStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TruncateStmt.java
@@ -68,7 +68,7 @@ public class TruncateStmt extends StatementBase {
           "TRUNCATE TABLE not supported on non-HDFS table: %s", table_.getFullName()));
     }
     analyzer.checkTableCapability(table_, Analyzer.OperationType.WRITE);
-    analyzer.ensureTableNotTransactional(table_);
+    analyzer.ensureTableNotTransactional(table_, "TRUNCATE TABLE");
   }
 
   @Override
diff --git a/fe/src/test/java/org/apache/impala/analysis/AnalyzerTest.java b/fe/src/test/java/org/apache/impala/analysis/AnalyzerTest.java
index 029f28d..4c042fe 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AnalyzerTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AnalyzerTest.java
@@ -540,13 +540,8 @@ public class AnalyzerTest extends FrontendTestBase {
       "Table functional_orc_def.full_transactional_table not supported. Transactional (ACID)" +
           " tables are only supported when they are configured as insert_only.";
 
-    String insertOnlyErrorMsg =
-      "Table functional.insert_only_transactional_table not supported. " +
-          "Transactional (ACID) tables are only supported for read.";
-
-    String insertOnlyErrorForFullMsg =
-      "Table functional_orc_def.full_transactional_table not supported. " +
-          "Transactional (ACID) tables are only supported for read.";
+    String insertOnlyErrorMsg = "%s not supported on " +
+      "transactional (ACID) table: functional.insert_only_transactional_table";
 
     AnalysisError(
         "create table test as select * from functional_orc_def.full_transactional_table",
@@ -580,14 +575,14 @@ public class AnalyzerTest extends FrontendTestBase {
 
     AnalysisError(
         "drop table functional_orc_def.full_transactional_table",
-        insertOnlyErrorForFullMsg);
+         errorMsg);
     AnalyzesOk("drop table functional.insert_only_transactional_table");
 
     AnalysisError(
         "truncate table functional_orc_def.full_transactional_table",
-        insertOnlyErrorForFullMsg);
+        errorMsg);
     AnalysisError("truncate table functional.insert_only_transactional_table",
-        insertOnlyErrorMsg);
+        String.format(insertOnlyErrorMsg, "TRUNCATE TABLE"));
 
     AnalysisError(
         "alter table functional_orc_def.full_transactional_table " +
@@ -596,13 +591,13 @@ public class AnalyzerTest extends FrontendTestBase {
     AnalysisError(
         "alter table functional.insert_only_transactional_table " +
             "add columns (col2 string)",
-        insertOnlyErrorMsg);
+        String.format(insertOnlyErrorMsg, "ALTER TABLE"));
 
     AnalysisError(
         "drop stats functional_orc_def.full_transactional_table",
         errorMsg);
     AnalysisError("drop stats functional.insert_only_transactional_table",
-        insertOnlyErrorMsg);
+        String.format(insertOnlyErrorMsg, "DROP STATS"));
 
     AnalyzesOk("describe functional.insert_only_transactional_table");
     AnalyzesOk("describe functional_orc_def.full_transactional_table");
diff --git a/testdata/workloads/functional-query/queries/QueryTest/acid-negative.test b/testdata/workloads/functional-query/queries/QueryTest/acid-negative.test
index 826622b..cfd8416 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/acid-negative.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/acid-negative.test
@@ -2,12 +2,12 @@
 ---- QUERY
 alter table functional.insert_only_transactional_table change column x y bigint;
 ---- CATCH
-AnalysisException: Table functional.insert_only_transactional_table not supported. Transactional (ACID) tables are only supported for read.
+AnalysisException: ALTER TABLE not supported on transactional (ACID) table: functional.insert_only_transactional_table
 ====
 ---- QUERY
 drop stats functional.insert_only_transactional_table;
 ---- CATCH
-AnalysisException: Table functional.insert_only_transactional_table not supported. Transactional (ACID) tables are only supported for read.
+AnalysisException: DROP STATS not supported on transactional (ACID) table: functional.insert_only_transactional_table
 ====
 ---- QUERY
 select * from functional_orc_def.full_transactional_table;


[impala] 03/04: IMPALA-8760: Disable TestAdmissionControllerStress tests for CentOS 6

Posted by bo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

boroknagyz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 0018b710f46f6f8551bf356225add8a12817a82f
Author: Bikramjeet Vig <bi...@cloudera.com>
AuthorDate: Thu Aug 22 11:43:09 2019 -0700

    IMPALA-8760: Disable TestAdmissionControllerStress tests for CentOS 6
    
    This test is tuned for certain timing which makes it flaky when run on
    CentOS 6 where that timing is a bit off. Since this is not providing any
    additional coverage by running on a different OS, it'll be disabled for
    CentOS 6.
    
    Change-Id: If63799f880f0883532467a00e362105a78878f17
    Reviewed-on: http://gerrit.cloudera.org:8080/14124
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 tests/common/environ.py                           | 12 ++++++++----
 tests/common/skip.py                              |  8 +++++++-
 tests/custom_cluster/test_admission_controller.py |  6 +++++-
 3 files changed, 20 insertions(+), 6 deletions(-)

diff --git a/tests/common/environ.py b/tests/common/environ.py
index 36b4a8a..cbdadff 100644
--- a/tests/common/environ.py
+++ b/tests/common/environ.py
@@ -46,11 +46,15 @@ if os.path.isfile(IMPALA_LOCAL_VERSION_INFO):
     raise Exception("Could not find VERSION in {0}".format(IMPALA_LOCAL_VERSION_INFO))
 
 # Check if it is Red Hat/CentOS Linux
-dist = platform.linux_distribution()[0].lower()
-if dist.find('centos') or dist.find('red hat'):
+distribution = platform.linux_distribution()
+distname = distribution[0].lower()
+version = distribution[1]
+IS_REDHAT_6_DERIVATIVE = False
+IS_REDHAT_DERIVATIVE = False
+if distname.find('centos') or distname.find('red hat'):
   IS_REDHAT_DERIVATIVE = True
-else:
-  IS_REDHAT_DERIVATIVE = False
+  if len(re.findall('^6\.*', version)) > 0:
+    IS_REDHAT_6_DERIVATIVE = True
 
 # Find the likely BuildType of the running Impala. Assume it's found through the path
 # $IMPALA_HOME/be/build/latest as a fallback.
diff --git a/tests/common/skip.py b/tests/common/skip.py
index afad729..4d9911a 100644
--- a/tests/common/skip.py
+++ b/tests/common/skip.py
@@ -25,7 +25,8 @@ import pytest
 from functools import partial
 
 from tests.common.environ import (ImpalaTestClusterProperties,
-    IS_DOCKERIZED_TEST_CLUSTER, IS_BUGGY_EL6_KERNEL, HIVE_MAJOR_VERSION)
+                                  IS_DOCKERIZED_TEST_CLUSTER, IS_BUGGY_EL6_KERNEL,
+                                  HIVE_MAJOR_VERSION, IS_REDHAT_6_DERIVATIVE)
 from tests.common.kudu_test_suite import get_kudu_master_flag
 from tests.util.filesystem_utils import (
     IS_ABFS,
@@ -274,3 +275,8 @@ class SkipIfCatalogV2:
     return pytest.mark.skipif(
       IMPALA_TEST_CLUSTER_PROPERTIES.is_catalog_v2_cluster(),
       reason="Table isn't invalidated with Local catalog and enabled hms_event_polling.")
+
+
+class SkipIfOS:
+  redhat6 = pytest.mark.skipif(IS_REDHAT_6_DERIVATIVE,
+                               reason="Flaky on redhat or centos 6")
diff --git a/tests/custom_cluster/test_admission_controller.py b/tests/custom_cluster/test_admission_controller.py
index f0fd4ac..11a2316 100644
--- a/tests/custom_cluster/test_admission_controller.py
+++ b/tests/custom_cluster/test_admission_controller.py
@@ -41,7 +41,8 @@ from tests.common.skip import (
     SkipIfABFS,
     SkipIfADLS,
     SkipIfEC,
-    SkipIfNotHdfsMinicluster)
+    SkipIfNotHdfsMinicluster,
+    SkipIfOS)
 from tests.common.test_dimensions import (
     create_single_exec_option_dimension,
     create_uncompressed_text_dimension)
@@ -1797,6 +1798,7 @@ class TestAdmissionControllerStress(TestAdmissionControllerBase):
         raise thread.error
 
   @pytest.mark.execute_serially
+  @SkipIfOS.redhat6
   @CustomClusterTestSuite.with_args(
       impalad_args=impalad_admission_ctrl_flags(max_requests=MAX_NUM_CONCURRENT_QUERIES,
         max_queued=MAX_NUM_QUEUED_QUERIES, pool_max_mem=-1, queue_wait_timeout_ms=600000),
@@ -1812,6 +1814,7 @@ class TestAdmissionControllerStress(TestAdmissionControllerBase):
       'mem_limit': sys.maxint})
 
   @pytest.mark.execute_serially
+  @SkipIfOS.redhat6
   @CustomClusterTestSuite.with_args(
     impalad_args=impalad_admission_ctrl_config_args(
       fs_allocation_file="fair-scheduler-test2.xml",
@@ -1833,6 +1836,7 @@ class TestAdmissionControllerStress(TestAdmissionControllerBase):
     return limit_metrics[0]
 
   @pytest.mark.execute_serially
+  @SkipIfOS.redhat6
   @CustomClusterTestSuite.with_args(
       impalad_args=impalad_admission_ctrl_flags(
         max_requests=MAX_NUM_CONCURRENT_QUERIES * 30, max_queued=MAX_NUM_QUEUED_QUERIES,


[impala] 02/04: IMPALA-8845: Cancel receiver's streams on exchange node's EOS

Posted by bo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

boroknagyz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 1c4bdcede475395d1139210a5d3ddf2641efa7eb
Author: Michael Ho <kw...@cloudera.com>
AuthorDate: Tue Aug 13 17:18:15 2019 -0700

    IMPALA-8845: Cancel receiver's streams on exchange node's EOS
    
    When an exchange node reaches its row count limit,
    the current code will not notify the sender fragments
    about it. Consequently, sender fragments may keep sending
    row batches to the exchange node but they won't be dequeued
    anymore. The sender fragments may end up blocking in the
    RPC indefinitely until either the query is cancelled or
    closed.
    
    This change fixes the problem above by cancelling the
    underlying receiver's streams of an exchange node once it
    reaches the row count limit. This will unblock all senders
    whose TransmitData() RPCs haven't been replied to yet. Any
    future row batches sent to this receiver will also be immediately
    replied to with a response indicating that this receiver is
    already closed so the sender will stop sending any more row
    batches to it.
    
    Change-Id: I10c805e9d63ed8af9f458bf71e8ef5ea9376b939
    Reviewed-on: http://gerrit.cloudera.org:8080/14101
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 be/src/exec/exchange-node.cc              | 18 ++++++---
 be/src/exec/exchange-node.h               |  8 ++++
 be/src/runtime/krpc-data-stream-mgr.cc    |  3 +-
 be/src/runtime/krpc-data-stream-recvr.cc  | 12 ++++--
 be/src/runtime/krpc-data-stream-recvr.h   |  8 ++--
 tests/custom_cluster/test_exchange_eos.py | 67 +++++++++++++++++++++++++++++++
 6 files changed, 103 insertions(+), 13 deletions(-)

diff --git a/be/src/exec/exchange-node.cc b/be/src/exec/exchange-node.cc
index 961cfdf..3f96432 100644
--- a/be/src/exec/exchange-node.cc
+++ b/be/src/exec/exchange-node.cc
@@ -157,12 +157,17 @@ Status ExchangeNode::FillInputRowBatch(RuntimeState* state) {
   return ret_status;
 }
 
+void ExchangeNode::ReleaseRecvrResources(RowBatch* output_batch) {
+  stream_recvr_->TransferAllResources(output_batch);
+  stream_recvr_->CancelStream();
+}
+
 Status ExchangeNode::GetNext(RuntimeState* state, RowBatch* output_batch, bool* eos) {
   SCOPED_TIMER(runtime_profile_->total_time_counter());
   ScopedGetNextEventAdder ea(this, eos);
   RETURN_IF_ERROR(ExecDebugAction(TExecNodePhase::GETNEXT, state));
   if (ReachedLimit()) {
-    stream_recvr_->TransferAllResources(output_batch);
+    ReleaseRecvrResources(output_batch);
     *eos = true;
     return Status::OK();
   } else {
@@ -195,7 +200,7 @@ Status ExchangeNode::GetNext(RuntimeState* state, RowBatch* output_batch, bool*
       COUNTER_SET(rows_returned_counter_, rows_returned());
 
       if (ReachedLimit()) {
-        stream_recvr_->TransferAllResources(output_batch);
+        ReleaseRecvrResources(output_batch);
         *eos = true;
         return Status::OK();
       }
@@ -205,7 +210,9 @@ Status ExchangeNode::GetNext(RuntimeState* state, RowBatch* output_batch, bool*
     // we need more rows
     stream_recvr_->TransferAllResources(output_batch);
     RETURN_IF_ERROR(FillInputRowBatch(state));
-    *eos = (input_batch_ == NULL);
+    *eos = (input_batch_ == nullptr);
+    // No need to call CancelStream() on the receiver here as all incoming row batches
+    // have been consumed so we should have replied to all senders already.
     if (*eos) return Status::OK();
     next_row_idx_ = 0;
     DCHECK(input_batch_->row_desc()->LayoutIsPrefixOf(*output_batch->row_desc()));
@@ -238,8 +245,9 @@ Status ExchangeNode::GetNextMerging(RuntimeState* state, RowBatch* output_batch,
   CheckLimitAndTruncateRowBatchIfNeeded(output_batch, eos);
 
   // On eos, transfer all remaining resources from the input batches maintained
-  // by the merger to the output batch.
-  if (*eos) stream_recvr_->TransferAllResources(output_batch);
+  // by the merger to the output batch. Also cancel the underlying receiver so
+  // the senders' fragments can exit early.
+  if (*eos) ReleaseRecvrResources(output_batch);
 
   COUNTER_SET(rows_returned_counter_, rows_returned());
   return Status::OK();
diff --git a/be/src/exec/exchange-node.h b/be/src/exec/exchange-node.h
index e9f1d00..cbd65a9 100644
--- a/be/src/exec/exchange-node.h
+++ b/be/src/exec/exchange-node.h
@@ -71,6 +71,14 @@ class ExchangeNode : public ExecNode {
   /// Only used when is_merging_ is false.
   Status FillInputRowBatch(RuntimeState* state);
 
+  /// Releases resources of the receiver by transferring the resource ownership of
+  /// the most recently dequeued row batch to 'output_batch'. Also cancels the underlying
+  /// receiver so all senders will get unblocked. This function is called after the
+  /// exchange node hits end-of-stream due to reaching the node's row count limit.
+  /// Please note that no more rows will be returned from the receiver once this function
+  /// is called.
+  void ReleaseRecvrResources(RowBatch* output_batch);
+
   int num_senders_;  // needed for stream_recvr_ construction
 
   /// The underlying DataStreamRecvrBase instance. Ownership is shared between this
diff --git a/be/src/runtime/krpc-data-stream-mgr.cc b/be/src/runtime/krpc-data-stream-mgr.cc
index 521f52c..855d6d0 100644
--- a/be/src/runtime/krpc-data-stream-mgr.cc
+++ b/be/src/runtime/krpc-data-stream-mgr.cc
@@ -326,7 +326,8 @@ Status KrpcDataStreamMgr::DeregisterRecvr(
 }
 
 void KrpcDataStreamMgr::Cancel(const TUniqueId& finst_id) {
-  VLOG_QUERY << "cancelling all streams for fragment_instance_id=" << PrintId(finst_id);
+  VLOG_QUERY << "cancelling active streams for fragment_instance_id="
+             << PrintId(finst_id);
   lock_guard<mutex> l(lock_);
   FragmentRecvrSet::iterator iter =
       fragment_recvr_set_.lower_bound(make_pair(finst_id, 0));
diff --git a/be/src/runtime/krpc-data-stream-recvr.cc b/be/src/runtime/krpc-data-stream-recvr.cc
index 7c2dbe1..2187c85 100644
--- a/be/src/runtime/krpc-data-stream-recvr.cc
+++ b/be/src/runtime/krpc-data-stream-recvr.cc
@@ -453,7 +453,9 @@ void KrpcDataStreamRecvr::SenderQueue::AddBatch(const TransmitDataRequestPB* req
     // responded to if we reach here.
     DCHECK_GT(num_remaining_senders_, 0);
     if (UNLIKELY(is_cancelled_)) {
-      DataStreamService::RespondRpc(Status::OK(), response, rpc_context);
+      Status cancel_status = Status::Expected(TErrorCode::DATASTREAM_RECVR_CLOSED,
+          PrintId(recvr_->fragment_instance_id()), recvr_->dest_node_id());
+      DataStreamService::RespondRpc(cancel_status, response, rpc_context);
       return;
     }
 
@@ -557,7 +559,9 @@ void KrpcDataStreamRecvr::SenderQueue::TakeOverEarlySender(
   {
     unique_lock<SpinLock> l(lock_);
     if (UNLIKELY(is_cancelled_)) {
-      DataStreamService::RespondRpc(Status::OK(), ctx->response, ctx->rpc_context);
+      Status cancel_status = Status::Expected(TErrorCode::DATASTREAM_RECVR_CLOSED,
+          PrintId(recvr_->fragment_instance_id()), recvr_->dest_node_id());
+      DataStreamService::RespondRpc(cancel_status, ctx->response, ctx->rpc_context);
       return;
     }
     // Only enqueue a deferred RPC if the sender queue is not yet cancelled.
@@ -589,7 +593,9 @@ void KrpcDataStreamRecvr::SenderQueue::Cancel() {
     // Respond to deferred RPCs.
     while (!deferred_rpcs_.empty()) {
       const unique_ptr<TransmitDataCtx>& ctx = deferred_rpcs_.front();
-      DataStreamService::RespondAndReleaseRpc(Status::OK(), ctx->response,
+      Status cancel_status = Status::Expected(TErrorCode::DATASTREAM_RECVR_CLOSED,
+          PrintId(recvr_->fragment_instance_id()), recvr_->dest_node_id());
+      DataStreamService::RespondAndReleaseRpc(cancel_status, ctx->response,
           ctx->rpc_context, recvr_->deferred_rpc_tracker());
       DequeueDeferredRpc(l);
     }
diff --git a/be/src/runtime/krpc-data-stream-recvr.h b/be/src/runtime/krpc-data-stream-recvr.h
index 063ebe1..f9fcc22 100644
--- a/be/src/runtime/krpc-data-stream-recvr.h
+++ b/be/src/runtime/krpc-data-stream-recvr.h
@@ -115,6 +115,10 @@ class KrpcDataStreamRecvr {
   /// queue to the specified batch. Called from fragment instance execution threads only.
   void TransferAllResources(RowBatch* transfer_batch);
 
+  /// Marks all sender queues as cancelled and notifies all waiting consumers of
+  /// the cancellation.
+  void CancelStream();
+
   const TUniqueId& fragment_instance_id() const { return fragment_instance_id_; }
   PlanNodeId dest_node_id() const { return dest_node_id_; }
   const RowDescriptor* row_desc() const { return row_desc_; }
@@ -156,10 +160,6 @@ class KrpcDataStreamRecvr {
   /// sender queue. Called from KrpcDataStreamMgr.
   void RemoveSender(int sender_id);
 
-  /// Marks all sender queues as cancelled and notifies all waiting consumers of
-  /// cancellation.
-  void CancelStream();
-
   /// Return true if the addition of a new batch of size 'batch_size' would exceed the
   /// total buffer limit.
   bool ExceedsLimit(int64_t batch_size) {
diff --git a/tests/custom_cluster/test_exchange_eos.py b/tests/custom_cluster/test_exchange_eos.py
new file mode 100644
index 0000000..b26739f
--- /dev/null
+++ b/tests/custom_cluster/test_exchange_eos.py
@@ -0,0 +1,67 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import pytest
+
+from tests.common.custom_cluster_test_suite import CustomClusterTestSuite
+from tests.common.impala_cluster import ImpalaCluster
+from tests.verifiers.metric_verifier import MetricVerifier
+
+
+class TestExchangeEos(CustomClusterTestSuite):
+  """ Test to verify that the senders' fragments get unblocked and run to completion
+  after exchange node hits eos"""
+
+  @classmethod
+  def get_workload(cls):
+    return 'tpch'
+
+  @classmethod
+  def add_test_dimensions(cls):
+    super(CustomClusterTestSuite, cls).add_test_dimensions()
+    cls.ImpalaTestMatrix.add_constraint(lambda v:
+        v.get_value('table_format').file_format == 'parquet')
+
+  @pytest.mark.execute_serially
+  @CustomClusterTestSuite.with_args(cluster_size=9, num_exclusive_coordinators=1)
+  def test_exchange_eos(self, vector):
+    """ Test IMPALA-8845: runs with result spooling enabled and defers the fetching
+    of results until all non-coordinator fragments have completed. It aims to verify
+    that once the coordinator fragment reaches eos, the rest of the fragments will
+    get unblocked. Using a cluster of size 9 which can reliably reproduce the hang of
+    some non-coordinator fragments without the fix of IMPALA-8845.
+    """
+
+    cluster = ImpalaCluster.get_e2e_test_cluster()
+    coordinator = cluster.get_first_impalad()
+    client = coordinator.service.create_beeswax_client()
+
+    vector.get_value('exec_option')['spool_query_results'] = 'true'
+    for query in ["select * from tpch.lineitem order by l_orderkey limit 10000",
+                  "select * from tpch.lineitem limit 10000"]:
+      handle = self.execute_query_async_using_client(client, query, vector)
+      for impalad in ImpalaCluster.get_e2e_test_cluster().impalads:
+        verifier = MetricVerifier(impalad.service)
+        if impalad.get_webserver_port() == coordinator.get_webserver_port():
+          num_fragments = 1
+        else:
+          num_fragments = 0
+        verifier.wait_for_metric("impala-server.num-fragments-in-flight", num_fragments)
+      results = client.fetch(query, handle)
+      assert results.success
+      assert len(results.data) == 10000
+    client.close()


[impala] 01/04: Remove redundant table name population in Kudu integration

Posted by bo...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

boroknagyz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 141066684ddb7542575280a6b8acde06ff201dfe
Author: Hao Hao <ha...@cloudera.com>
AuthorDate: Fri Aug 23 13:50:16 2019 -0700

    Remove redundant table name population in Kudu integration
    
    This patch removes the hack to populate table name if empty in Kudu
    integration. Since with the current Kudu version, the storage handler
    now supports 'kudu.table_name' property and this table name should not
    be empty.
    
    Change-Id: Iaa88ae5f0597ef203b60adcc972d06f8f4a418b7
    Reviewed-on: http://gerrit.cloudera.org:8080/14130
    Reviewed-by: Thomas Tauber-Marshall <tm...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 .../java/org/apache/impala/catalog/KuduTable.java  | 25 ----------------------
 1 file changed, 25 deletions(-)

diff --git a/fe/src/main/java/org/apache/impala/catalog/KuduTable.java b/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
index 338c979..3db60ee 100644
--- a/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
@@ -117,13 +117,6 @@ public class KuduTable extends Table implements FeKuduTable {
     super(msTable, db, name, owner);
     kuduTableName_ = msTable.getParameters().get(KuduTable.KEY_TABLE_NAME);
     kuduMasters_ = msTable.getParameters().get(KuduTable.KEY_MASTER_HOSTS);
-    if (kuduTableName_ == null || kuduTableName_.isEmpty()) {
-      // When 'kudu.table_name' property is empty, it implies Kudu/HMS
-      // integration is enabled.
-      // TODO: remove this hack once Kudu support 'kudu.table_name'
-      // property with the new storage handler.
-      populateDefaultTableName(msTable, /* isHMSIntegrationEnabled */true);
-    }
   }
 
   @Override
@@ -166,17 +159,6 @@ public class KuduTable extends Table implements FeKuduTable {
   }
 
   /**
-   * Populates the default table name.
-   */
-  private void populateDefaultTableName(
-      org.apache.hadoop.hive.metastore.api.Table msTbl,
-      boolean isHMSIntegrationEnabled) {
-    kuduTableName_ = KuduUtil.getDefaultKuduTableName(
-        msTbl.getDbName(), msTbl.getTableName(), isHMSIntegrationEnabled);
-    msTbl.getParameters().put(KuduTable.KEY_TABLE_NAME, kuduTableName_);
-  }
-
-  /**
    * Get the Hive Metastore configuration from Kudu masters.
    */
   private static HiveMetastoreConfig getHiveMetastoreConfig(String kuduMasters)
@@ -297,13 +279,6 @@ public class KuduTable extends Table implements FeKuduTable {
       // Copy the table to check later if anything has changed.
       msTable_ = msTbl.deepCopy();
       kuduTableName_ = msTable_.getParameters().get(KuduTable.KEY_TABLE_NAME);
-      if (kuduTableName_ == null || kuduTableName_.isEmpty()) {
-        // When 'kudu.table_name' property is empty, it implies Kudu/HMS
-        // integration is enabled.
-        // TODO: remove this hack once Kudu support 'kudu.table_name'
-        // property with the new storage handler.
-        populateDefaultTableName(msTable_, /* isHMSIntegrationEnabled */true);
-      }
       kuduMasters_ = msTable_.getParameters().get(KuduTable.KEY_MASTER_HOSTS);
       if (kuduMasters_ == null || kuduMasters_.isEmpty()) {
         throw new TableLoadingException("No " + KuduTable.KEY_MASTER_HOSTS +