You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by wz...@apache.org on 2023/09/06 04:35:35 UTC

[impala] branch master updated: IMPALA-12416: Fix test failures caused by IMPALA-11535

This is an automated email from the ASF dual-hosted git repository.

wzhou pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git


The following commit(s) were added to refs/heads/master by this push:
     new 188c2d637 IMPALA-12416: Fix test failures caused by IMPALA-11535
188c2d637 is described below

commit 188c2d6379d0dc3f6fddcd307dcfa875fe754353
Author: Sai Hemanth Gantasala <sa...@cloudera.com>
AuthorDate: Tue Sep 5 15:57:47 2023 -0700

    IMPALA-12416: Fix test failures caused by
    IMPALA-11535
    
    Fixed the test failures in Java unit tests caused by incorrectly
    setting the config 'enable_sync_to_latest_event_on_ddls' to true. This
    flag has to be reset to its original value at the end of the test since
    BackendConfig.INSTANCE is shared by all the FE tests. Also, increased
    the hms polling interval to 10sec for the test_skipping_older_events()
    end-to-end test to avoid flakiness.
    
    Change-Id: I4930933dca849496bfbe475c8efc960d15fa57a8
    Reviewed-on: http://gerrit.cloudera.org:8080/20454
    Reviewed-by: Quanlong Huang <hu...@gmail.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 .../events/MetastoreEventsProcessorTest.java       | 78 ++++++++++++----------
 tests/custom_cluster/test_events_custom_configs.py | 12 ++--
 2 files changed, 49 insertions(+), 41 deletions(-)

diff --git a/fe/src/test/java/org/apache/impala/catalog/events/MetastoreEventsProcessorTest.java b/fe/src/test/java/org/apache/impala/catalog/events/MetastoreEventsProcessorTest.java
index bcf75c16d..bfd44f86c 100644
--- a/fe/src/test/java/org/apache/impala/catalog/events/MetastoreEventsProcessorTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/events/MetastoreEventsProcessorTest.java
@@ -3214,41 +3214,49 @@ public class MetastoreEventsProcessorTest {
    */
   @Test
   public void testSkippingOlderEvents() throws Exception {
-    BackendConfig.INSTANCE.setEnableSyncToLatestEventOnDdls(true);
-    BackendConfig.INSTANCE.setSkippingOlderEvents(true);
-    createDatabase(TEST_DB_NAME, null);
-    final String testTblName = "testSkippingOlderEvents";
-    createTable(testTblName, true);
-    eventsProcessor_.processEvents();
-    AlterTableExecutor hiveExecutor = new HiveAlterTableExecutor(TEST_DB_NAME,
-        testTblName);
-    hiveExecutor.execute();
-    HdfsTable testTbl = (HdfsTable)catalog_.getOrLoadTable(TEST_DB_NAME, testTblName,
-        "test", null);
-    long lastSyncEventIdBefore = testTbl.getLastRefreshEventId();
-    alterTableAddParameter(testTblName, "somekey", "someval");
-    eventsProcessor_.processEvents();
-    assertEquals(testTbl.getLastRefreshEventId(), eventsProcessor_.getCurrentEventId());
-    assertTrue(testTbl.getLastRefreshEventId() > lastSyncEventIdBefore);
-    confirmTableIsLoaded(TEST_DB_NAME, testTblName);
-    final String testUnpartTblName = "testUnPartSkippingOlderEvents";
-    createTable(testUnpartTblName, false);
-    testInsertEvents(TEST_DB_NAME, testUnpartTblName, false);
-    testTbl = (HdfsTable)catalog_.getOrLoadTable(TEST_DB_NAME, testUnpartTblName,
-        "test", null);
-    eventsProcessor_.processEvents();
-    assertEquals(testTbl.getLastRefreshEventId(), eventsProcessor_.getCurrentEventId());
-    confirmTableIsLoaded(TEST_DB_NAME, testUnpartTblName);
-    // Verify older HMS events are skipped by doing refresh in Impala
-    alterTableAddCol(testUnpartTblName, "newCol", "string", "test new column");
-    testTbl = (HdfsTable)catalog_.getOrLoadTable(TEST_DB_NAME, testUnpartTblName,
-        "test", null);
-    lastSyncEventIdBefore = testTbl.getLastRefreshEventId();
-    catalog_.reloadTable(testTbl, "test");
-    eventsProcessor_.processEvents();
-    assertEquals(testTbl.getLastRefreshEventId(), eventsProcessor_.getCurrentEventId());
-    assertTrue(testTbl.getLastRefreshEventId() > lastSyncEventIdBefore);
-    confirmTableIsLoaded(TEST_DB_NAME, testTblName);
+    boolean prevFlagVal = BackendConfig.INSTANCE.enableSyncToLatestEventOnDdls();
+    boolean invalidateHMSFlag = BackendConfig.INSTANCE.invalidateCatalogdHMSCacheOnDDLs();
+    try {
+      BackendConfig.INSTANCE.setEnableSyncToLatestEventOnDdls(true);
+      BackendConfig.INSTANCE.setInvalidateCatalogdHMSCacheOnDDLs(false);
+      BackendConfig.INSTANCE.setSkippingOlderEvents(true);
+      createDatabase(TEST_DB_NAME, null);
+      final String testTblName = "testSkippingOlderEvents";
+      createTable(testTblName, true);
+      eventsProcessor_.processEvents();
+      AlterTableExecutor hiveExecutor = new HiveAlterTableExecutor(TEST_DB_NAME,
+          testTblName);
+      hiveExecutor.execute();
+      HdfsTable testTbl = (HdfsTable) catalog_.getOrLoadTable(TEST_DB_NAME, testTblName,
+          "test", null);
+      long lastSyncEventIdBefore = testTbl.getLastRefreshEventId();
+      alterTableAddParameter(testTblName, "somekey", "someval");
+      eventsProcessor_.processEvents();
+      assertEquals(testTbl.getLastRefreshEventId(), eventsProcessor_.getCurrentEventId());
+      assertTrue(testTbl.getLastRefreshEventId() > lastSyncEventIdBefore);
+      confirmTableIsLoaded(TEST_DB_NAME, testTblName);
+      final String testUnpartTblName = "testUnPartSkippingOlderEvents";
+      createTable(testUnpartTblName, false);
+      testInsertEvents(TEST_DB_NAME, testUnpartTblName, false);
+      testTbl = (HdfsTable) catalog_.getOrLoadTable(TEST_DB_NAME, testUnpartTblName,
+          "test", null);
+      eventsProcessor_.processEvents();
+      assertEquals(testTbl.getLastRefreshEventId(), eventsProcessor_.getCurrentEventId());
+      confirmTableIsLoaded(TEST_DB_NAME, testUnpartTblName);
+      // Verify older HMS events are skipped by doing refresh in Impala
+      alterTableAddCol(testUnpartTblName, "newCol", "string", "test new column");
+      testTbl = (HdfsTable) catalog_.getOrLoadTable(TEST_DB_NAME, testUnpartTblName,
+          "test", null);
+      lastSyncEventIdBefore = testTbl.getLastRefreshEventId();
+      catalog_.reloadTable(testTbl, "test");
+      eventsProcessor_.processEvents();
+      assertEquals(testTbl.getLastRefreshEventId(), eventsProcessor_.getCurrentEventId());
+      assertTrue(testTbl.getLastRefreshEventId() > lastSyncEventIdBefore);
+      confirmTableIsLoaded(TEST_DB_NAME, testTblName);
+    } finally {
+      BackendConfig.INSTANCE.setEnableSyncToLatestEventOnDdls(prevFlagVal);
+      BackendConfig.INSTANCE.setInvalidateCatalogdHMSCacheOnDDLs(invalidateHMSFlag);
+    }
   }
 
   private void createDatabase(String catName, String dbName,
diff --git a/tests/custom_cluster/test_events_custom_configs.py b/tests/custom_cluster/test_events_custom_configs.py
index e37c45ae6..5adb1ea1a 100644
--- a/tests/custom_cluster/test_events_custom_configs.py
+++ b/tests/custom_cluster/test_events_custom_configs.py
@@ -312,7 +312,7 @@ class TestEventProcessingCustomConfigs(CustomClusterTestSuite):
     assert EventProcessorUtils.get_event_processor_status() == "ACTIVE"
 
   @CustomClusterTestSuite.with_args(
-    catalogd_args="--hms_event_polling_interval_s=5"
+    catalogd_args="--hms_event_polling_interval_s=10"
                   " --enable_skipping_older_events=true"
                   " --enable_sync_to_latest_event_on_ddls=true")
   def test_skipping_older_events(self, unique_database):
@@ -360,11 +360,11 @@ class TestEventProcessingCustomConfigs(CustomClusterTestSuite):
         complete_query += query.format(unique_database, table_name)
       verify_skipping_hive_stmt_events(complete_query, table_name)
       # Dynamic partitions test
-      query = " ".join(["create", "transactional" if is_transactional else '',
-        "table `{}`.`{}` (i int)",
-        "partitioned by (year int)" if is_partitioned else '', ";"])
-      complete_query = query.format(unique_database, "new_table")
-      complete_query += "insert overwrite table `{db}`.`{tbl1}` " \
+      query = " ".join(["create", "table `{}`.`{}` (i int)",
+        " partitioned by (year int) " if is_partitioned else '',
+        self.__get_transactional_tblproperties(is_transactional)])
+      self.client.execute(query.format(unique_database, "new_table"))
+      complete_query = "insert overwrite table `{db}`.`{tbl1}` " \
         "select * from `{db}`.`{tbl2}`"\
         .format(db=unique_database, tbl1="new_table", tbl2=table_name)
       verify_skipping_hive_stmt_events(complete_query, "new_table")