You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by st...@apache.org on 2023/02/22 07:39:00 UTC

[impala] branch master updated (ff7b5db60 -> 88a7538af)

This is an automated email from the ASF dual-hosted git repository.

stigahuang pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git


    from ff7b5db60 IMPALA-11081: Fix incorrect results in partition key scan
     new 47e67afbc IMPALA-11932: skip test_partition_key_scans_with_multiple_blocks_table under erasure coding
     new 89cc20717 IMPALA-4052: CREATE TABLE LIKE for Kudu tables
     new 315349054 IMPALA-11802: Optimize count(*) queries for Iceberg V2 position delete tables
     new 88a7538af IMPALA-11869: Use to_string() in PrintValue for printing Thrift types

The 4 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 be/src/util/debug-util.h                           |   7 +-
 common/thrift/Query.thrift                         |   3 +
 .../java/org/apache/impala/analysis/Analyzer.java  |  26 +++--
 .../impala/analysis/CreateTableLikeStmt.java       |  38 +++++--
 .../main/java/org/apache/impala/analysis/Expr.java |   5 +
 .../org/apache/impala/analysis/SelectStmt.java     |  53 ++++++----
 .../org/apache/impala/catalog/FeIcebergTable.java  |  49 +++++++--
 .../apache/impala/planner/IcebergScanPlanner.java  |  15 ++-
 .../impala/rewrite/CountStarToConstRule.java       |  65 ++++++++++--
 .../apache/impala/service/CatalogOpExecutor.java   |  54 +++++++++-
 .../main/java/org/apache/impala/util/KuduUtil.java |   5 +
 .../org/apache/impala/analysis/AnalyzeDDLTest.java |  22 +++-
 .../functional/functional_schema_template.sql      |   2 +-
 .../queries/PlannerTest/iceberg-v2-tables.test     | 104 +++++++++++++++++++
 .../iceberg-plain-count-star-optimization.test     |   2 +-
 .../iceberg-v2-plain-count-star-optimization.test  |  47 +++++++++
 .../iceberg-v2-read-position-deletes-orc.test      |  63 ++++++++++-
 .../iceberg-v2-read-position-deletes.test          |  98 +++++++++++++++++-
 .../QueryTest/kudu_create_table_like_table.test    | 115 +++++++++++++++++++++
 tests/custom_cluster/test_kudu.py                  |   6 ++
 tests/query_test/test_iceberg.py                   |   6 ++
 tests/query_test/test_kudu.py                      |   7 ++
 tests/query_test/test_queries.py                   |  10 +-
 23 files changed, 726 insertions(+), 76 deletions(-)
 create mode 100644 testdata/workloads/functional-query/queries/QueryTest/iceberg-v2-plain-count-star-optimization.test
 create mode 100644 testdata/workloads/functional-query/queries/QueryTest/kudu_create_table_like_table.test


[impala] 04/04: IMPALA-11869: Use to_string() in PrintValue for printing Thrift types

Posted by st...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

stigahuang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 88a7538af79afc19233cdfe3220e3c25e7edd16d
Author: Daniel Becker <da...@cloudera.com>
AuthorDate: Thu Feb 9 14:25:31 2023 +0100

    IMPALA-11869: Use to_string() in PrintValue for printing Thrift types
    
    IMPALA-11645 introduced the function PrintValue() which we use to
    convert Thrift types to strings. This function used operator<<,
    which is provided for the generated Thrift types. However, Thrift also
    generates a to_string() function that is overloaded for Thrift types. It
    would be more efficient to use this instead of involving streams with
    operator<<.
    
    This change reimplements PrintValue() using to_string() instead of
    operator<<.
    
    Change-Id: Ibc5b847dea2bdea7ba0ab8e093a8bab5a8145019
    Reviewed-on: http://gerrit.cloudera.org:8080/19487
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 be/src/util/debug-util.h | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/be/src/util/debug-util.h b/be/src/util/debug-util.h
index 51b73d979..66fa8e7ff 100644
--- a/be/src/util/debug-util.h
+++ b/be/src/util/debug-util.h
@@ -51,12 +51,11 @@ class TupleRow;
 // Forward declaration to avoid including descriptors.h.
 typedef std::vector<int> SchemaPath;
 
-// Converts a value for which operator<< is defined to a std::string.
+// Used to convert Thrift objects to strings. Thrift defines a 'to_string()' function for
+// each type.
 template<class T>
 std::string PrintValue(const T& value) {
-  std::stringstream s;
-  s << value;
-  return s.str();
+  return to_string(value);
 }
 
 std::string PrintTuple(const Tuple* t, const TupleDescriptor& d);


[impala] 01/04: IMPALA-11932: skip test_partition_key_scans_with_multiple_blocks_table under erasure coding

Posted by st...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

stigahuang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 47e67afbcaa14d9b72f5bcb48555cce5eb2fb3a5
Author: zhangyifan27 <ch...@163.com>
AuthorDate: Mon Feb 20 10:35:51 2023 +0800

    IMPALA-11932: skip test_partition_key_scans_with_multiple_blocks_table under erasure coding
    
    The erasure coding policy used in our test is RS-3-2-1024k, which
    requires the block size should not less than 1MB. The test introduced
    in IMPALA-11081 use 'dfs.block.size=1024' to create multiple blocks
    table, we should skip this test under erasure coding to avoid test
    failures.
    
    Change-Id: I0f088102c380df89f56870d901852f7dde2d72fe
    Reviewed-on: http://gerrit.cloudera.org:8080/19515
    Reviewed-by: Quanlong Huang <hu...@gmail.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 tests/query_test/test_queries.py | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/tests/query_test/test_queries.py b/tests/query_test/test_queries.py
index 5d747fcda..dbde99a2f 100644
--- a/tests/query_test/test_queries.py
+++ b/tests/query_test/test_queries.py
@@ -22,11 +22,12 @@ import re
 from copy import deepcopy
 
 from tests.common.impala_test_suite import ImpalaTestSuite
-from tests.common.skip import SkipIfEC, SkipIfCatalogV2, SkipIfNotHdfsMinicluster
+from tests.common.skip import (
+    SkipIfEC, SkipIfCatalogV2, SkipIfNotHdfsMinicluster, SkipIfFS)
 from tests.common.test_dimensions import (
-   create_uncompressed_text_dimension, create_exec_option_dimension_from_dict,
-   create_client_protocol_dimension, hs2_parquet_constraint,
-   extend_exec_option_dimension, FILE_FORMAT_TO_STORED_AS_MAP)
+    create_uncompressed_text_dimension, create_exec_option_dimension_from_dict,
+    create_client_protocol_dimension, hs2_parquet_constraint,
+    extend_exec_option_dimension, FILE_FORMAT_TO_STORED_AS_MAP)
 from tests.util.filesystem_utils import get_fs_path
 from subprocess import check_call
 
@@ -363,6 +364,7 @@ class TestPartitionKeyScansWithMultipleBlocks(ImpalaTestSuite):
     self.client.execute("alter table %s.alltypes_multiblocks recover partitions"
         % (unique_database))
 
+  @SkipIfFS.hdfs_small_block
   def test_partition_key_scans_with_multiple_blocks_table(self, vector, unique_database):
     self._build_alltypes_multiblocks_table(vector, unique_database)
     result = self.execute_query_expect_success(self.client,


[impala] 03/04: IMPALA-11802: Optimize count(*) queries for Iceberg V2 position delete tables

Posted by st...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

stigahuang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 3153490545d1b3730ba17bc020909f2ae9c18d94
Author: LPL <li...@apache.org>
AuthorDate: Fri Feb 10 11:57:40 2023 +0800

    IMPALA-11802: Optimize count(*) queries for Iceberg V2 position delete tables
    
    The SCAN plan of count star query for Iceberg V2 position delete tables
    as follows:
    
        AGGREGATE
        COUNT(*)
            |
        UNION ALL
       /         \
      /           \
     /             \
    SCAN all    ANTI JOIN
    datafiles  /         \
    without   /           \
    deletes  SCAN         SCAN
             datafiles    deletes
    
    Since Iceberg provides the number of records in a file(record_count), we
    can use this to optimize a simple count star query for Iceberg V2
    position delete tables. Firstly, the number of records of all DataFiles
    without corresponding DeleteFiles can be calculated by Iceberg meta
    files. And then rewrite the query as follows:
    
          ArithmeticExpr(ADD)
          /             \
         /               \
        /                 \
    record_count       AGGREGATE
    of all             COUNT(*)
    datafiles              |
    without            ANTI JOIN
    deletes           /         \
                     /           \
                    SCAN        SCAN
                    datafiles   deletes
    
    Testing:
     * Existing tests
     * Added e2e tests
    
    Change-Id: I8172c805121bf91d23fe063f806493afe2f03d41
    Reviewed-on: http://gerrit.cloudera.org:8080/19494
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
    Reviewed-by: Zoltan Borok-Nagy <bo...@cloudera.com>
---
 common/thrift/Query.thrift                         |   3 +
 .../java/org/apache/impala/analysis/Analyzer.java  |  26 ++++--
 .../main/java/org/apache/impala/analysis/Expr.java |   5 +
 .../org/apache/impala/analysis/SelectStmt.java     |  53 +++++++----
 .../org/apache/impala/catalog/FeIcebergTable.java  |  49 +++++++---
 .../apache/impala/planner/IcebergScanPlanner.java  |  15 ++-
 .../impala/rewrite/CountStarToConstRule.java       |  65 +++++++++++--
 .../queries/PlannerTest/iceberg-v2-tables.test     | 104 +++++++++++++++++++++
 .../iceberg-plain-count-star-optimization.test     |   2 +-
 .../iceberg-v2-plain-count-star-optimization.test  |  47 ++++++++++
 .../iceberg-v2-read-position-deletes-orc.test      |  63 ++++++++++++-
 .../iceberg-v2-read-position-deletes.test          |  98 ++++++++++++++++++-
 tests/query_test/test_iceberg.py                   |   6 ++
 13 files changed, 485 insertions(+), 51 deletions(-)

diff --git a/common/thrift/Query.thrift b/common/thrift/Query.thrift
index 2c433b809..49d34b40d 100644
--- a/common/thrift/Query.thrift
+++ b/common/thrift/Query.thrift
@@ -792,6 +792,9 @@ struct TQueryCtx {
 
   // True if the query is transactional for Kudu table.
   29: required bool is_kudu_transactional = false
+
+  // True if the query can be optimized for Iceberg V2 table.
+  30: required bool optimize_count_star_for_iceberg_v2 = false
 }
 
 
diff --git a/fe/src/main/java/org/apache/impala/analysis/Analyzer.java b/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
index c2e9c174a..2acdea7a9 100644
--- a/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
+++ b/fe/src/main/java/org/apache/impala/analysis/Analyzer.java
@@ -231,8 +231,12 @@ public class Analyzer {
   // if an exception was encountered.
   private String mvAuthExceptionMsg_ = null;
 
-  // Total records num of the Iceberg table.
-  private long totalRecordsNum_;
+  // Total records num V1 is calculated by all DataFiles of the Iceberg V1 table.
+  private long totalRecordsNumV1_;
+
+  // Total records num V2 is calculated by all DataFiles without corresponding DeleteFiles
+  // to be applied of the Iceberg V2 table.
+  private long totalRecordsNumV2_;
 
   // Required Operation type: Read, write, any(read or write).
   public enum OperationType {
@@ -992,18 +996,28 @@ public class Analyzer {
     return mvAuthExceptionMsg_;
   }
 
-  public void setTotalRecordsNum(long totalRecordsNum) {
-    totalRecordsNum_ = totalRecordsNum;
+  public void setTotalRecordsNumV1(long totalRecordsNumV1) {
+    totalRecordsNumV1_ = totalRecordsNumV1;
+  }
+
+  public long getTotalRecordsNumV1() { return totalRecordsNumV1_; }
+
+  public void setTotalRecordsNumV2(long totalRecordsNumV2) {
+    totalRecordsNumV2_ = totalRecordsNumV2;
   }
 
-  public long getTotalRecordsNum() { return totalRecordsNum_; }
+  public long getTotalRecordsNumV2() {
+    return totalRecordsNumV2_;
+  }
 
   /**
    * Check if 'count(*)' FunctionCallExpr can be rewritten as LiteralExpr. When
    * totalRecordsNum_ is 0, no optimization 'count(*)' is still very fast, so return true
    * only if totalRecordsNum_ is greater than 0.
    */
-  public boolean canRewriteCountStarToConst() { return totalRecordsNum_ > 0; }
+  public boolean canRewriteCountStarForV1() { return totalRecordsNumV1_ > 0; }
+
+  public boolean canRewriteCountStartForV2() { return totalRecordsNumV2_ > 0; }
 
   /**
    * Register conjuncts that are outer joined by a full outer join. For a given
diff --git a/fe/src/main/java/org/apache/impala/analysis/Expr.java b/fe/src/main/java/org/apache/impala/analysis/Expr.java
index 44c478efb..5b2ed1463 100644
--- a/fe/src/main/java/org/apache/impala/analysis/Expr.java
+++ b/fe/src/main/java/org/apache/impala/analysis/Expr.java
@@ -418,6 +418,8 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
   // True after analysis successfully completed. Protected by accessors isAnalyzed() and
   // analysisDone().
   private boolean isAnalyzed_ = false;
+  private boolean isRewritten_ = false;
+
 
   // True if this has already been counted towards the number of statement expressions
   private boolean isCountedForNumStmtExprs_ = false;
@@ -443,6 +445,7 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
     isAuxExpr_ = other.isAuxExpr_;
     type_ = other.type_;
     isAnalyzed_ = other.isAnalyzed_;
+    isRewritten_ = other.isRewritten_;
     isOnClauseConjunct_ = other.isOnClauseConjunct_;
     printSqlInParens_ = other.printSqlInParens_;
     selectivity_ = other.selectivity_;
@@ -460,6 +463,8 @@ abstract public class Expr extends TreeNode<Expr> implements ParseNode, Cloneabl
   }
 
   public boolean isAnalyzed() { return isAnalyzed_; }
+  public boolean isRewritten() { return isRewritten_; }
+  public void setRewritten(boolean isRewritten) { isRewritten_ = isRewritten; }
   public ExprId getId() { return id_; }
   protected void setId(ExprId id) { id_ = id; }
   public Type getType() { return type_; }
diff --git a/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java b/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java
index e420462be..b3de08ac7 100644
--- a/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/SelectStmt.java
@@ -30,6 +30,7 @@ import java.util.Set;
 import java.util.stream.Stream;
 import java.util.stream.Collectors;
 
+import org.apache.iceberg.Table;
 import org.apache.impala.analysis.Path.PathType;
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.catalog.ArrayType;
@@ -1424,7 +1425,7 @@ public class SelectStmt extends QueryStmt {
 
 
   /**
-   * Set totalRecordsNum_ in analyzer_ for the plain count(*) queries of Iceberg tables.
+   * Set totalRecordsNumVx_ in analyzer_ for the plain count(*) queries of Iceberg tables.
    * Queries that can be rewritten need to meet the following requirements:
    *  - stmt does not have WHERE clause
    *  - stmt does not have GROUP BY clause
@@ -1433,9 +1434,8 @@ public class SelectStmt extends QueryStmt {
    *  - tableRef doesn't have sampling param
    *  - table is the Iceberg table
    *  - SelectList must contains 'count(*)' or 'count(constant)'
-   *  - SelectList can contain other agg functions, e.g. min, sum, etc
    *  - SelectList can contain constant
-   *
+   *  - only for V1: SelectList can contain other agg functions, e.g. min, sum, etc
    * e.g. 'SELECT count(*) FROM iceberg_tbl' would be rewritten as 'SELECT constant'.
    */
   public void optimizePlainCountStarQuery() throws AnalysisException {
@@ -1459,28 +1459,45 @@ public class SelectStmt extends QueryStmt {
     }
     if (!(table instanceof FeIcebergTable)) return;
 
+    analyzer_.checkStmtExprLimit();
+    Table iceTable = ((FeIcebergTable) table).getIcebergApiTable();
+    if (Utils.hasDeleteFiles(iceTable, tableRef.getTimeTravelSpec())) {
+      optimizePlainCountStarQueryV2(tableRef, (FeIcebergTable)table);
+    } else {
+      optimizePlainCountStarQueryV1(tableRef, iceTable);
+    }
+  }
+
+  private void optimizePlainCountStarQueryV2(TableRef tableRef, FeIcebergTable table)
+      throws AnalysisException {
+    for (SelectListItem selectItem : getSelectList().getItems()) {
+      Expr expr = selectItem.getExpr();
+      if (expr == null) return;
+      if (expr.isConstant()) continue;
+      if (!FunctionCallExpr.isCountStarFunctionCallExpr(expr)) return;
+    }
+    long num = Utils.getRecordCountV2(table, tableRef.getTimeTravelSpec());
+    if (num > 0) {
+      analyzer_.getQueryCtx().setOptimize_count_star_for_iceberg_v2(true);
+      analyzer_.setTotalRecordsNumV2(num);
+    }
+  }
+
+  private void optimizePlainCountStarQueryV1(TableRef tableRef, Table iceTable) {
     boolean hasCountStarFunc = false;
     boolean hasAggFunc = false;
-    analyzer_.checkStmtExprLimit();
-    for (SelectListItem selectItem : this.getSelectList().getItems()) {
+    for (SelectListItem selectItem : getSelectList().getItems()) {
       Expr expr = selectItem.getExpr();
-      if (expr == null) continue;
+      if (expr == null) return;
       if (expr.isConstant()) continue;
-      if (FunctionCallExpr.isCountStarFunctionCallExpr(expr)) {
-        hasCountStarFunc = true;
-      } else if (expr.isAggregate()) {
-        hasAggFunc = true;
-      } else {
-        return;
-      }
+      if (FunctionCallExpr.isCountStarFunctionCallExpr(expr)) { hasCountStarFunc = true; }
+      else if (expr.isAggregate()) { hasAggFunc = true; }
+      else return;
     }
     if (!hasCountStarFunc) return;
-
-    long num = Utils.getRecordCount(
-        ((FeIcebergTable) table).getIcebergApiTable(), tableRef.getTimeTravelSpec());
+    long num = Utils.getRecordCountV1(iceTable, tableRef.getTimeTravelSpec());
     if (num <= 0) return;
-    analyzer_.setTotalRecordsNum(num);
-
+    analyzer_.setTotalRecordsNumV1(num);
     if (hasAggFunc) return;
     // When all select items are 'count(*)' or constant, 'select count(*) from ice_tbl;'
     // would need to be rewritten as 'select const;'
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeIcebergTable.java b/fe/src/main/java/org/apache/impala/catalog/FeIcebergTable.java
index b3a66a0d8..fbda3fd06 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FeIcebergTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FeIcebergTable.java
@@ -63,6 +63,7 @@ import org.apache.impala.analysis.TimeTravelSpec.Kind;
 import org.apache.impala.catalog.HdfsPartition.FileBlock;
 import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.catalog.iceberg.GroupedContentFiles;
+import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.FileSystemUtil;
 import org.apache.impala.common.Pair;
 import org.apache.impala.common.PrintUtils;
@@ -862,22 +863,13 @@ public interface FeIcebergTable extends FeFsTable {
      * the record count cannot be retrieved from the table summary.
      * If 'travelSpec' is null then the current snapshot is being used.
      */
-    public static long getRecordCount(Table icebergTable,
-        TimeTravelSpec travelSpec) {
+    public static long getRecordCountV1(Table icebergTable, TimeTravelSpec travelSpec) {
       Map<String, String> summary = getSnapshotSummary(icebergTable, travelSpec);
       if (summary == null) return -1;
 
       String totalRecordsStr = summary.get(SnapshotSummary.TOTAL_RECORDS_PROP);
       if (Strings.isNullOrEmpty(totalRecordsStr)) return -1;
-
       try {
-        // We cannot tell the record count from the summary if there are deleted rows.
-        String totalDeleteFilesStr = summary.get(SnapshotSummary.TOTAL_DELETE_FILES_PROP);
-        if (!Strings.isNullOrEmpty(totalDeleteFilesStr)) {
-          long totalDeleteFiles = Long.parseLong(totalDeleteFilesStr);
-          if (totalDeleteFiles > 0) return -1;
-        }
-
         return Long.parseLong(totalRecordsStr);
       } catch (NumberFormatException ex) {
         LOG.warn("Failed to get {} from iceberg table summary. Table name: {}, " +
@@ -885,10 +877,45 @@ public interface FeIcebergTable extends FeFsTable {
             SnapshotSummary.TOTAL_RECORDS_PROP, icebergTable.name(),
             icebergTable.location(), totalRecordsStr, ex);
       }
-
       return -1;
     }
 
+    /**
+     * Return the record count that is calculated by all DataFiles without deletes.
+     */
+    public static long getRecordCountV2(FeIcebergTable table, TimeTravelSpec travelSpec)
+        throws AnalysisException {
+      if (travelSpec == null) {
+        return table.getContentFileStore()
+            .getDataFilesWithoutDeletes().stream()
+            .mapToLong(file -> file.getFbFileMetadata().icebergMetadata().recordCount())
+            .sum();
+      }
+      try {
+        return IcebergUtil.getIcebergFiles(table, Lists.newArrayList(), travelSpec)
+            .dataFilesWithoutDeletes.stream()
+            .mapToLong(ContentFile::recordCount)
+            .sum();
+      } catch (TableLoadingException e) {
+        throw new AnalysisException("Failed to get record count of Iceberg V2 table: "
+            + table.getFullName() ,e);
+      }
+    }
+
+    /**
+     * Return true if the Iceberg has DeleteFiles.
+     */
+    public static boolean hasDeleteFiles(Table icebergTable, TimeTravelSpec travelSpec) {
+      Map<String, String> summary = getSnapshotSummary(icebergTable, travelSpec);
+      if (summary == null) return false;
+      String totalDeleteFilesStr = summary.get(SnapshotSummary.TOTAL_DELETE_FILES_PROP);
+      if (!Strings.isNullOrEmpty(totalDeleteFilesStr)) {
+        long totalDeleteFiles = Long.parseLong(totalDeleteFilesStr);
+        return totalDeleteFiles > 0;
+      }
+      return false;
+    }
+
     /**
      * Get the snapshot summary from the Iceberg table.
      */
diff --git a/fe/src/main/java/org/apache/impala/planner/IcebergScanPlanner.java b/fe/src/main/java/org/apache/impala/planner/IcebergScanPlanner.java
index 2df5bc329..dea0ebcc2 100644
--- a/fe/src/main/java/org/apache/impala/planner/IcebergScanPlanner.java
+++ b/fe/src/main/java/org/apache/impala/planner/IcebergScanPlanner.java
@@ -176,11 +176,16 @@ public class IcebergScanPlanner {
       return ret;
     }
     PlanNode joinNode = createPositionJoinNode();
-    if (dataFilesWithoutDeletes_.isEmpty()) {
-      // All data files has corresponding delete files, so we just return an ANTI JOIN
-      // between all data files and all delete files.
-      return joinNode;
-    }
+
+    // If the count star query can be optimized for Iceberg V2 table, the number of rows
+    // of all DataFiles without corresponding DeleteFiles can be calculated by Iceberg
+    // meta files, it's added using ArithmeticExpr.
+    if (ctx_.getQueryCtx().isOptimize_count_star_for_iceberg_v2()) return joinNode;
+
+    // All data files has corresponding delete files, so we just return an ANTI JOIN
+    // between all data files and all delete files.
+    if (dataFilesWithoutDeletes_.isEmpty()) return joinNode;
+
     // If there are data files without corresponding delete files to be applied, we
     // can just create a SCAN node for these and do a UNION ALL with the ANTI JOIN.
     IcebergScanNode dataScanNode = new IcebergScanNode(
diff --git a/fe/src/main/java/org/apache/impala/rewrite/CountStarToConstRule.java b/fe/src/main/java/org/apache/impala/rewrite/CountStarToConstRule.java
index 6709ec28d..dc1fa0ef9 100644
--- a/fe/src/main/java/org/apache/impala/rewrite/CountStarToConstRule.java
+++ b/fe/src/main/java/org/apache/impala/rewrite/CountStarToConstRule.java
@@ -18,25 +18,74 @@
 package org.apache.impala.rewrite;
 
 import org.apache.impala.analysis.Analyzer;
+import org.apache.impala.analysis.ArithmeticExpr;
+import org.apache.impala.analysis.ArithmeticExpr.Operator;
 import org.apache.impala.analysis.Expr;
 import org.apache.impala.analysis.FunctionCallExpr;
 import org.apache.impala.analysis.LiteralExpr;
+import org.apache.impala.analysis.NumericLiteral;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
 
 /**
  * Rewrite rule to replace plain count star function call expr to const expr.
+ * Examples:
+ * 1. Iceberg V1 Table
+ * 1.1 "SELECT COUNT(*) FROM ice_tbl" -> "SELECT `CONST`"
+ * 1.2 "SELECT COUNT(*),MIN(col_a),MAX(col_b) FROM ice_tbl" -> "SELECT `CONST`,MIN(col_a),
+ * MAX(col_b) FROM ice_tbl"
+ *
+ * 2. Iceberg V2 Table
+ *
+ *     AGGREGATE
+ *     COUNT(*)
+ *         |
+ *     UNION ALL
+ *    /        \
+ *   /          \
+ *  /            \
+ * SCAN all  ANTI JOIN
+ * datafiles  /      \
+ * without   /        \
+ * deletes  SCAN      SCAN
+ *          datafiles deletes
+ *
+ *          ||
+ *        rewrite
+ *          ||
+ *          \/
+ *
+ *    ArithmeticExpr(ADD)
+ *    /             \
+ *   /               \
+ *  /                 \
+ * record_count  AGGREGATE
+ * of all        COUNT(*)
+ * datafiles         |
+ * without       ANTI JOIN
+ * deletes      /         \
+ *             /           \
+ *             SCAN        SCAN
+ *             datafiles   deletes
  */
 public enum CountStarToConstRule implements ExprRewriteRule {
 
-    INSTANCE,
-    ;
+  INSTANCE,
+  ;
 
-    @Override
-    public Expr apply(Expr expr, Analyzer analyzer) throws AnalysisException {
-        if (!FunctionCallExpr.isCountStarFunctionCallExpr(expr)) return expr;
-        if (!analyzer.canRewriteCountStarToConst()) return expr;
-        return LiteralExpr.createFromUnescapedStr(String.valueOf(
-            analyzer.getTotalRecordsNum()), Type.BIGINT);
+  @Override
+  public Expr apply(Expr expr, Analyzer analyzer) throws AnalysisException {
+    if (expr.isRewritten()) return expr;
+    if (!FunctionCallExpr.isCountStarFunctionCallExpr(expr)) return expr;
+    if (analyzer.canRewriteCountStarForV1()) {
+      return LiteralExpr.createFromUnescapedStr(String.valueOf(
+          analyzer.getTotalRecordsNumV1()), Type.BIGINT);
+    } else if (analyzer.canRewriteCountStartForV2()) {
+      expr.setRewritten(true);
+      return new ArithmeticExpr(Operator.ADD, expr, NumericLiteral.create(
+          analyzer.getTotalRecordsNumV2()));
+    } else {
+      return expr;
     }
+  }
 }
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/iceberg-v2-tables.test b/testdata/workloads/functional-planner/queries/PlannerTest/iceberg-v2-tables.test
index 7186114ef..74315a408 100644
--- a/testdata/workloads/functional-planner/queries/PlannerTest/iceberg-v2-tables.test
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/iceberg-v2-tables.test
@@ -307,6 +307,110 @@ PLAN-ROOT SINK
    HDFS partitions=1/1 files=2 size=1.22KB
    row-size=36B cardinality=4
 ====
+SELECT count(*) from iceberg_v2_positional_not_all_data_files_have_delete_files for system_version as of 7490459762454857930;
+---- PLAN
+PLAN-ROOT SINK
+|
+00:UNION
+   constant-operands=1
+   row-size=8B cardinality=1
+---- DISTRIBUTEDPLAN
+PLAN-ROOT SINK
+|
+00:UNION
+   constant-operands=1
+   row-size=8B cardinality=1
+====
+SELECT count(*) from iceberg_v2_positional_not_all_data_files_have_delete_files for system_version as of 752781918366351945;
+---- PLAN
+PLAN-ROOT SINK
+|
+03:AGGREGATE [FINALIZE]
+|  output: count(*)
+|  row-size=8B cardinality=1
+|
+02:DELETE EVENTS HASH JOIN [LEFT ANTI JOIN]
+|  row-size=20B cardinality=3
+|
+|--01:SCAN HDFS [functional_parquet.iceberg_v2_positional_not_all_data_files_have_delete_files-POSITION-DELETE-01 functional_parquet.iceberg_v2_positional_not_all_data_files_have_delete_files-position-delete]
+|     HDFS partitions=1/1 files=1 size=2.63KB
+|     row-size=267B cardinality=1
+|
+00:SCAN HDFS [functional_parquet.iceberg_v2_positional_not_all_data_files_have_delete_files]
+   HDFS partitions=1/1 files=1 size=625B
+   row-size=20B cardinality=3
+---- DISTRIBUTEDPLAN
+PLAN-ROOT SINK
+|
+06:AGGREGATE [FINALIZE]
+|  output: count:merge(*)
+|  row-size=8B cardinality=1
+|
+05:EXCHANGE [UNPARTITIONED]
+|
+03:AGGREGATE
+|  output: count(*)
+|  row-size=8B cardinality=1
+|
+02:DELETE EVENTS HASH JOIN [LEFT ANTI JOIN, BROADCAST]
+|  row-size=20B cardinality=3
+|
+|--04:EXCHANGE [BROADCAST]
+|  |
+|  01:SCAN HDFS [functional_parquet.iceberg_v2_positional_not_all_data_files_have_delete_files-POSITION-DELETE-01 functional_parquet.iceberg_v2_positional_not_all_data_files_have_delete_files-position-delete]
+|     HDFS partitions=1/1 files=1 size=2.63KB
+|     row-size=267B cardinality=1
+|
+00:SCAN HDFS [functional_parquet.iceberg_v2_positional_not_all_data_files_have_delete_files]
+   HDFS partitions=1/1 files=1 size=625B
+   row-size=20B cardinality=3
+====
+SELECT count(*) from iceberg_v2_positional_not_all_data_files_have_delete_files;
+---- PLAN
+PLAN-ROOT SINK
+|
+03:AGGREGATE [FINALIZE]
+|  output: count(*)
+|  row-size=8B cardinality=1
+|
+02:DELETE EVENTS HASH JOIN [LEFT ANTI JOIN]
+|  row-size=20B cardinality=6
+|
+|--01:SCAN HDFS [functional_parquet.iceberg_v2_positional_not_all_data_files_have_delete_files-POSITION-DELETE-01 functional_parquet.iceberg_v2_positional_not_all_data_files_have_delete_files-position-delete]
+|     HDFS partitions=1/1 files=2 size=5.33KB
+|     row-size=267B cardinality=4
+|
+00:SCAN HDFS [functional_parquet.iceberg_v2_positional_not_all_data_files_have_delete_files]
+   HDFS partitions=1/1 files=2 size=1.22KB
+   row-size=20B cardinality=6
+---- DISTRIBUTEDPLAN
+PLAN-ROOT SINK
+|
+07:AGGREGATE [FINALIZE]
+|  output: count:merge(*)
+|  row-size=8B cardinality=1
+|
+06:EXCHANGE [UNPARTITIONED]
+|
+03:AGGREGATE
+|  output: count(*)
+|  row-size=8B cardinality=1
+|
+02:DELETE EVENTS HASH JOIN [LEFT ANTI JOIN, PARTITIONED]
+|  row-size=20B cardinality=6
+|
+|--05:EXCHANGE [HASH(functional_parquet.iceberg_v2_positional_not_all_data_files_have_delete_files-position-delete.pos,functional_parquet.iceberg_v2_positional_not_all_data_files_have_delete_files-position-delete.file_path)]
+|  |
+|  01:SCAN HDFS [functional_parquet.iceberg_v2_positional_not_all_data_files_have_delete_files-POSITION-DELETE-01 functional_parquet.iceberg_v2_positional_not_all_data_files_have_delete_files-position-delete]
+|     HDFS partitions=1/1 files=2 size=5.33KB
+|     row-size=267B cardinality=4
+|
+04:EXCHANGE [HASH(functional_parquet.iceberg_v2_positional_not_all_data_files_have_delete_files.file__position,functional_parquet.iceberg_v2_positional_not_all_data_files_have_delete_files.input__file__name)]
+|
+00:SCAN HDFS [functional_parquet.iceberg_v2_positional_not_all_data_files_have_delete_files]
+   HDFS partitions=1/1 files=2 size=1.22KB
+   row-size=20B cardinality=6
+====
 SELECT * from iceberg_v2_positional_update_all_rows
 ---- PLAN
 PLAN-ROOT SINK
diff --git a/testdata/workloads/functional-query/queries/QueryTest/iceberg-plain-count-star-optimization.test b/testdata/workloads/functional-query/queries/QueryTest/iceberg-plain-count-star-optimization.test
index cad21f929..5a3a19656 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/iceberg-plain-count-star-optimization.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/iceberg-plain-count-star-optimization.test
@@ -232,4 +232,4 @@ BIGINT
 ---- RUNTIME_PROFILE
 aggregation(SUM, NumRowGroups): 0
 aggregation(SUM, NumFileMetadataRead): 0
-====
+====
\ No newline at end of file
diff --git a/testdata/workloads/functional-query/queries/QueryTest/iceberg-v2-plain-count-star-optimization.test b/testdata/workloads/functional-query/queries/QueryTest/iceberg-v2-plain-count-star-optimization.test
new file mode 100644
index 000000000..c84ee868e
--- /dev/null
+++ b/testdata/workloads/functional-query/queries/QueryTest/iceberg-v2-plain-count-star-optimization.test
@@ -0,0 +1,47 @@
+====
+---- QUERY
+select count(*) as c from functional_parquet.iceberg_v2_positional_not_all_data_files_have_delete_files
+union all
+(select count(*) c from functional_parquet.iceberg_v2_positional_not_all_data_files_have_delete_files_orc)
+union all
+(select -1 as c)
+union all
+(select count(*) as c from functional_parquet.iceberg_v2_no_deletes)
+union all
+(select count(*) as c from functional_parquet.iceberg_v2_no_deletes_orc) order by c;
+---- RESULTS
+-1
+3
+3
+6
+6
+---- TYPES
+BIGINT
+---- RUNTIME_PROFILE
+aggregation(SUM, NumRowGroups): 4
+aggregation(SUM, NumOrcStripes): 4
+aggregation(SUM, NumFileMetadataRead): 0
+====
+---- QUERY
+select count(*) as c from iceberg_v2_positional_not_all_data_files_have_delete_files for system_version as of 752781918366351945
+union all
+(select count(*) as c from iceberg_v2_positional_not_all_data_files_have_delete_files_orc for system_version as of 5003445199566617082)
+union all
+(select -1 as c)
+union all
+(select count(*) as c from functional_parquet.iceberg_v2_no_deletes)
+union all
+(select count(*) as c from functional_parquet.iceberg_v2_no_deletes_orc) order by c;
+---- RESULTS
+-1
+3
+3
+9
+9
+---- TYPES
+BIGINT
+---- RUNTIME_PROFILE
+aggregation(SUM, NumRowGroups): 2
+aggregation(SUM, NumOrcStripes): 2
+aggregation(SUM, NumFileMetadataRead): 0
+====
diff --git a/testdata/workloads/functional-query/queries/QueryTest/iceberg-v2-read-position-deletes-orc.test b/testdata/workloads/functional-query/queries/QueryTest/iceberg-v2-read-position-deletes-orc.test
index e7566df64..a2af39918 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/iceberg-v2-read-position-deletes-orc.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/iceberg-v2-read-position-deletes-orc.test
@@ -74,11 +74,24 @@ SHOW TABLE STATS iceberg_v2_positional_delete_all_rows_orc
 BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING,STRING
 ====
 ---- QUERY
-SELECT count(*) from iceberg_v2_positional_delete_all_rows_orc;
+SELECT count(*) from iceberg_v2_positional_delete_all_rows_orc for system_version as of 4807054508647143162
+---- RESULTS
+3
+---- TYPES
+bigint
+---- RUNTIME_PROFILE
+aggregation(SUM, NumOrcStripes): 0
+aggregation(SUM, NumFileMetadataRead): 0
+====
+---- QUERY
+SELECT count(*) from iceberg_v2_positional_delete_all_rows_orc
 ---- RESULTS
 0
 ---- TYPES
 bigint
+---- RUNTIME_PROFILE
+aggregation(SUM, NumOrcStripes): 2
+aggregation(SUM, NumFileMetadataRead): 0
 ====
 ---- QUERY
 COMPUTE STATS iceberg_v2_positional_not_all_data_files_have_delete_files_orc
@@ -113,11 +126,46 @@ SHOW TABLE STATS iceberg_v2_positional_not_all_data_files_have_delete_files_orc
 BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING,STRING
 ====
 ---- QUERY
+SHOW FILES IN iceberg_v2_positional_not_all_data_files_have_delete_files_orc;
+---- RESULTS
+row_regex:'$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_not_all_data_files_have_delete_files_orc/data/00000-0-data.*.orc','.*','','$ERASURECODE_POLICY'
+row_regex:'$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_not_all_data_files_have_delete_files_orc/data/00000-0-data.*.orc','.*','','$ERASURECODE_POLICY'
+row_regex:'$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_not_all_data_files_have_delete_files_orc/data/00000-0-data.*.orc','.*','','$ERASURECODE_POLICY'
+row_regex:'$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_not_all_data_files_have_delete_files_orc/data/00000-0-data.*.orc','.*','','$ERASURECODE_POLICY'
+row_regex:'$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_not_all_data_files_have_delete_files_orc/data/00000-0-delete.*.orc','.*','','$ERASURECODE_POLICY'
+row_regex:'$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_not_all_data_files_have_delete_files_orc/data/00000-0-delete.*.orc','.*','','$ERASURECODE_POLICY'
+---- TYPES
+STRING, STRING, STRING, STRING
+====
+---- QUERY
+SELECT count(*) from iceberg_v2_positional_not_all_data_files_have_delete_files_orc for system_version as of 8476486151350891395
+---- RESULTS
+7
+---- TYPES
+bigint
+---- RUNTIME_PROFILE
+aggregation(SUM, NumOrcStripes): 0
+aggregation(SUM, NumFileMetadataRead): 0
+====
+---- QUERY
+SELECT count(*) from iceberg_v2_positional_not_all_data_files_have_delete_files_orc for system_version as of 5003445199566617082
+---- RESULTS
+9
+---- TYPES
+bigint
+---- RUNTIME_PROFILE
+aggregation(SUM, NumOrcStripes): 2
+aggregation(SUM, NumFileMetadataRead): 0
+====
+---- QUERY
 SELECT count(*) from iceberg_v2_positional_not_all_data_files_have_delete_files_orc
 ---- RESULTS
 6
 ---- TYPES
 bigint
+---- RUNTIME_PROFILE
+aggregation(SUM, NumOrcStripes): 4
+aggregation(SUM, NumFileMetadataRead): 0
 ====
 ---- QUERY
 COMPUTE STATS iceberg_v2_partitioned_position_deletes_orc
@@ -152,11 +200,24 @@ SHOW TABLE STATS iceberg_v2_partitioned_position_deletes_orc
 BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING,STRING
 ====
 ---- QUERY
+SELECT count(*) from iceberg_v2_partitioned_position_deletes_orc for system_version as of 5416468273053855108
+---- RESULTS
+20
+---- TYPES
+bigint
+---- RUNTIME_PROFILE
+aggregation(SUM, NumOrcStripes): 0
+aggregation(SUM, NumFileMetadataRead): 0
+====
+---- QUERY
 SELECT count(*) from iceberg_v2_partitioned_position_deletes_orc
 ---- RESULTS
 10
 ---- TYPES
 bigint
+---- RUNTIME_PROFILE
+aggregation(SUM, NumOrcStripes): 6
+aggregation(SUM, NumFileMetadataRead): 0
 ====
 ---- QUERY
 SELECT count(*) from iceberg_v2_no_deletes_orc where i = 2;
diff --git a/testdata/workloads/functional-query/queries/QueryTest/iceberg-v2-read-position-deletes.test b/testdata/workloads/functional-query/queries/QueryTest/iceberg-v2-read-position-deletes.test
index 8d491cdb4..e9c7b985f 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/iceberg-v2-read-position-deletes.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/iceberg-v2-read-position-deletes.test
@@ -74,11 +74,24 @@ SHOW TABLE STATS iceberg_v2_delete_positional
 BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING,STRING
 ====
 ---- QUERY
+SELECT count(*) from iceberg_v2_delete_positional for system_version as of 6816997371555012807
+---- RESULTS
+3
+---- TYPES
+bigint
+---- RUNTIME_PROFILE
+aggregation(SUM, NumRowGroups): 0
+aggregation(SUM, NumFileMetadataRead): 0
+====
+---- QUERY
 SELECT count(*) from iceberg_v2_delete_positional;
 ---- RESULTS
 2
 ---- TYPES
 bigint
+---- RUNTIME_PROFILE
+aggregation(SUM, NumRowGroups): 2
+aggregation(SUM, NumFileMetadataRead): 0
 ====
 ---- QUERY
 COMPUTE STATS iceberg_v2_positional_delete_all_rows
@@ -113,11 +126,24 @@ SHOW TABLE STATS iceberg_v2_positional_delete_all_rows
 BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING,STRING
 ====
 ---- QUERY
-SELECT count(*) from iceberg_v2_positional_delete_all_rows;
+SELECT count(*) from iceberg_v2_positional_delete_all_rows for system_version as of 8593920101374128463
+---- RESULTS
+3
+---- TYPES
+bigint
+---- RUNTIME_PROFILE
+aggregation(SUM, NumRowGroups): 0
+aggregation(SUM, NumFileMetadataRead): 0
+====
+---- QUERY
+SELECT count(*) from iceberg_v2_positional_delete_all_rows
 ---- RESULTS
 0
 ---- TYPES
 bigint
+---- RUNTIME_PROFILE
+aggregation(SUM, NumRowGroups): 2
+aggregation(SUM, NumFileMetadataRead): 0
 ====
 ---- QUERY
 COMPUTE STATS iceberg_v2_positional_not_all_data_files_have_delete_files
@@ -152,11 +178,46 @@ SHOW TABLE STATS iceberg_v2_positional_not_all_data_files_have_delete_files
 BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING,STRING
 ====
 ---- QUERY
+SHOW FILES IN iceberg_v2_positional_not_all_data_files_have_delete_files;
+---- RESULTS
+row_regex:'$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_not_all_data_files_have_delete_files/data/00000-0-data.*.parquet','.*','','$ERASURECODE_POLICY'
+row_regex:'$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_not_all_data_files_have_delete_files/data/00000-0-data.*.parquet','.*','','$ERASURECODE_POLICY'
+row_regex:'$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_not_all_data_files_have_delete_files/data/00000-0-data.*.parquet','.*','','$ERASURECODE_POLICY'
+row_regex:'$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_not_all_data_files_have_delete_files/data/00000-0-data.*.parquet','.*','','$ERASURECODE_POLICY'
+row_regex:'$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_not_all_data_files_have_delete_files/data/00000-0-delete.*.parquet','.*','','$ERASURECODE_POLICY'
+row_regex:'$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_not_all_data_files_have_delete_files/data/00000-0-delete.*.parquet','.*','','$ERASURECODE_POLICY'
+---- TYPES
+STRING, STRING, STRING, STRING
+====
+---- QUERY
+SELECT count(*) from iceberg_v2_positional_not_all_data_files_have_delete_files for system_version as of 7490459762454857930
+---- RESULTS
+10
+---- TYPES
+bigint
+---- RUNTIME_PROFILE
+aggregation(SUM, NumRowGroups): 0
+aggregation(SUM, NumFileMetadataRead): 0
+====
+---- QUERY
+SELECT count(*) from iceberg_v2_positional_not_all_data_files_have_delete_files for system_version as of 752781918366351945
+---- RESULTS
+9
+---- TYPES
+bigint
+---- RUNTIME_PROFILE
+aggregation(SUM, NumRowGroups): 2
+aggregation(SUM, NumFileMetadataRead): 0
+====
+---- QUERY
 SELECT count(*) from iceberg_v2_positional_not_all_data_files_have_delete_files
 ---- RESULTS
 6
 ---- TYPES
 bigint
+---- RUNTIME_PROFILE
+aggregation(SUM, NumRowGroups): 4
+aggregation(SUM, NumFileMetadataRead): 0
 ====
 ---- QUERY
 COMPUTE STATS iceberg_v2_positional_update_all_rows
@@ -191,11 +252,33 @@ SHOW TABLE STATS iceberg_v2_positional_update_all_rows
 BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING,STRING
 ====
 ---- QUERY
+SHOW FILES IN iceberg_v2_positional_update_all_rows;
+---- RESULTS
+row_regex:'$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_update_all_rows/data/00000-0-data.*.parquet','.*','','$ERASURECODE_POLICY'
+row_regex:'$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_update_all_rows/data/00000-0-data.*.parquet','.*','','$ERASURECODE_POLICY'
+row_regex:'$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_update_all_rows/data/00000-0-delete.*.parquet','.*','','$ERASURECODE_POLICY'
+---- TYPES
+STRING, STRING, STRING, STRING
+====
+---- QUERY
+SELECT count(*) from iceberg_v2_positional_update_all_rows for system_version as of 3877007445826010687
+---- RESULTS
+3
+---- TYPES
+bigint
+---- RUNTIME_PROFILE
+aggregation(SUM, NumRowGroups): 2
+aggregation(SUM, NumFileMetadataRead): 0
+====
+---- QUERY
 SELECT count(*) from iceberg_v2_positional_update_all_rows
 ---- RESULTS
 3
 ---- TYPES
 bigint
+---- RUNTIME_PROFILE
+aggregation(SUM, NumRowGroups): 2
+aggregation(SUM, NumFileMetadataRead): 0
 ====
 ---- QUERY
 COMPUTE STATS iceberg_v2_partitioned_position_deletes
@@ -230,11 +313,24 @@ SHOW TABLE STATS iceberg_v2_partitioned_position_deletes
 BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING,STRING
 ====
 ---- QUERY
+SELECT count(*) from iceberg_v2_partitioned_position_deletes for system_version as of 2057976186205897384
+---- RESULTS
+20
+---- TYPES
+bigint
+---- RUNTIME_PROFILE
+aggregation(SUM, NumRowGroups): 0
+aggregation(SUM, NumFileMetadataRead): 0
+====
+---- QUERY
 SELECT count(*) from iceberg_v2_partitioned_position_deletes
 ---- RESULTS
 10
 ---- TYPES
 bigint
+---- RUNTIME_PROFILE
+aggregation(SUM, NumRowGroups): 6
+aggregation(SUM, NumFileMetadataRead): 0
 ====
 ---- QUERY
 SELECT count(*) from iceberg_v2_no_deletes where i = 2;
diff --git a/tests/query_test/test_iceberg.py b/tests/query_test/test_iceberg.py
index 0ed9a6d8f..6f1938c0d 100644
--- a/tests/query_test/test_iceberg.py
+++ b/tests/query_test/test_iceberg.py
@@ -985,6 +985,12 @@ class TestIcebergV2Table(IcebergTestSuite):
   # The test uses pre-written Iceberg tables where the position delete files refer to
   # the data files via full URI, i.e. they start with 'hdfs://localhost:2050/...'. In the
   # dockerised environment the namenode is accessible on a different hostname/port.
+  @SkipIfDockerizedCluster.internal_hostname
+  @SkipIf.hardcoded_uris
+  def test_plain_count_star_optimization(self, vector):
+      self.run_test_case('QueryTest/iceberg-v2-plain-count-star-optimization',
+                         vector)
+
   @SkipIfDockerizedCluster.internal_hostname
   @SkipIf.hardcoded_uris
   def test_read_position_deletes(self, vector):


[impala] 02/04: IMPALA-4052: CREATE TABLE LIKE for Kudu tables

Posted by st...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

stigahuang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 89cc20717eb0f054db59b4de06f7f01279eeb252
Author: gaoxq <ga...@gmail.com>
AuthorDate: Wed Jul 13 20:02:29 2022 +0800

    IMPALA-4052: CREATE TABLE LIKE for Kudu tables
    
    This commit implements cloning between Kudu tables, including clone the
    schema and hash partitions. But there is one limitation, cloning of
    Kudu tables with range paritions is not supported. For cloning range
    partitions, it's tracked by IMPALA-11912.
    
    Cloning Kudu tables from other types of tables is not implemented,
    because the table creation statements are different.
    
    Testing:
     - e2e tests
     - AnalyzeDDLTest tests
    
    Change-Id: Ia3d276a6465301dbcfed17bb713aca06367d9a42
    Reviewed-on: http://gerrit.cloudera.org:8080/18729
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 .../impala/analysis/CreateTableLikeStmt.java       |  38 +++++--
 .../apache/impala/service/CatalogOpExecutor.java   |  54 +++++++++-
 .../main/java/org/apache/impala/util/KuduUtil.java |   5 +
 .../org/apache/impala/analysis/AnalyzeDDLTest.java |  22 +++-
 .../functional/functional_schema_template.sql      |   2 +-
 .../QueryTest/kudu_create_table_like_table.test    | 115 +++++++++++++++++++++
 tests/custom_cluster/test_kudu.py                  |   6 ++
 tests/query_test/test_kudu.py                      |   7 ++
 8 files changed, 232 insertions(+), 17 deletions(-)

diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeStmt.java b/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeStmt.java
index 727e3c3d0..0b66bd1a6 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateTableLikeStmt.java
@@ -166,21 +166,13 @@ public class CreateTableLikeStmt extends StatementBase {
   public void analyze(Analyzer analyzer) throws AnalysisException {
     Preconditions.checkState(tableName_ != null && !tableName_.isEmpty());
     Preconditions.checkState(srcTableName_ != null && !srcTableName_.isEmpty());
-    // We currently don't support creating a Kudu table using a CREATE TABLE LIKE
-    // statement (see IMPALA-4052).
-    if (fileFormat_ == THdfsFileFormat.KUDU) {
-      throw new AnalysisException("CREATE TABLE LIKE is not supported for Kudu tables");
-    }
 
     // Make sure the source table exists and the user has permission to access it.
     FeTable srcTable = analyzer.getTable(srcTableName_, Privilege.VIEW_METADATA);
 
     analyzer.ensureTableNotBucketed(srcTable);
 
-    if (KuduTable.isKuduTable(srcTable.getMetaStoreTable())) {
-      throw new AnalysisException("Cloning a Kudu table using CREATE TABLE LIKE is " +
-          "not supported.");
-    }
+    validateCreateKuduTableParams(srcTable);
 
     // Only clone between Iceberg tables because the Data Types of Iceberg and Impala
     // do not correspond one by one, the transformation logic is in
@@ -215,4 +207,32 @@ public class CreateTableLikeStmt extends StatementBase {
       TableDef.analyzeSortColumns(sortColumns_, srcTable, sortingOrder_);
     }
   }
+
+  private void validateCreateKuduTableParams(FeTable srcTable) throws AnalysisException {
+    // Only clone between Kudu tables because the table creation statements are different.
+    if ((fileFormat_ == THdfsFileFormat.KUDU
+            && !KuduTable.isKuduTable(srcTable.getMetaStoreTable()))
+        || (fileFormat_ != null && fileFormat_ != THdfsFileFormat.KUDU
+               && KuduTable.isKuduTable(srcTable.getMetaStoreTable()))) {
+      throw new AnalysisException(String.format(
+          "%s cannot be cloned into a %s table: CREATE TABLE LIKE is not supported "
+              + "between Kudu tables and non-Kudu tables.",
+          srcTable.getFullName(), fileFormat_.toString()));
+    }
+    if (sortColumns_ != null && KuduTable.isKuduTable(srcTable.getMetaStoreTable())) {
+      throw new AnalysisException(srcTable.getFullName()
+          + " cannot be cloned because SORT BY is not supported for Kudu tables.");
+    }
+    if (srcTable instanceof KuduTable) {
+      KuduTable kuduTable = (KuduTable) srcTable;
+      for (KuduPartitionParam kuduPartitionParam : kuduTable.getPartitionBy()) {
+        // TODO: IMPALA-11912: Add support for cloning a Kudu table with range partitions
+        if (kuduPartitionParam.getType() == KuduPartitionParam.Type.RANGE) {
+          throw new AnalysisException(
+              "CREATE TABLE LIKE is not supported for Kudu tables having range "
+              + "partitions.");
+        }
+      }
+    }
+  }
 }
diff --git a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
index 21c2ac28f..a99f8a00e 100644
--- a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
+++ b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
@@ -86,6 +86,7 @@ import org.apache.iceberg.catalog.TableIdentifier;
 import org.apache.iceberg.mr.Catalogs;
 import org.apache.impala.analysis.AlterTableSortByStmt;
 import org.apache.impala.analysis.FunctionName;
+import org.apache.impala.analysis.KuduPartitionParam;
 import org.apache.impala.analysis.LiteralExpr;
 import org.apache.impala.analysis.TableName;
 import org.apache.impala.authorization.AuthorizationConfig;
@@ -115,6 +116,7 @@ import org.apache.impala.catalog.HdfsPartition;
 import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.HiveStorageDescriptorFactory;
 import org.apache.impala.catalog.IncompleteTable;
+import org.apache.impala.catalog.KuduColumn;
 import org.apache.impala.catalog.KuduTable;
 import org.apache.impala.catalog.MetaStoreClientPool.MetaStoreClient;
 import org.apache.impala.catalog.PartitionNotFoundException;
@@ -212,6 +214,7 @@ import org.apache.impala.thrift.THdfsFileFormat;
 import org.apache.impala.thrift.TIcebergCatalog;
 import org.apache.impala.thrift.TImpalaTableType;
 import org.apache.impala.thrift.TIcebergPartitionSpec;
+import org.apache.impala.thrift.TKuduPartitionParam;
 import org.apache.impala.thrift.TOwnerType;
 import org.apache.impala.thrift.TPartitionDef;
 import org.apache.impala.thrift.TPartitionKeyValue;
@@ -3772,7 +3775,6 @@ public class CatalogOpExecutor {
   private void createTableLike(TCreateTableLikeParams params, TDdlExecResponse response,
       boolean syncDdl, boolean wantMinimalResult) throws ImpalaException {
     Preconditions.checkNotNull(params);
-
     THdfsFileFormat fileFormat =
         params.isSetFile_format() ? params.getFile_format() : null;
     String comment = params.isSetComment() ? params.getComment() : null;
@@ -3819,8 +3821,6 @@ public class CatalogOpExecutor {
         "Load source for CREATE TABLE LIKE");
     org.apache.hadoop.hive.metastore.api.Table tbl =
         srcTable.getMetaStoreTable().deepCopy();
-    Preconditions.checkState(!KuduTable.isKuduTable(tbl),
-        "CREATE TABLE LIKE is not supported for Kudu tables.");
     tbl.setDbName(tblName.getDb());
     tbl.setTableName(tblName.getTbl());
     tbl.setOwner(params.getOwner());
@@ -3908,6 +3908,10 @@ public class CatalogOpExecutor {
           .toThrift();
       createIcebergTable(tbl, wantMinimalResult, response, params.if_not_exists, columns,
           partitionSpec, tableProperties, params.getComment());
+    } else if (srcTable instanceof KuduTable && KuduTable.isKuduTable(tbl)) {
+      TCreateTableParams createTableParams =
+          extractKuduCreateTableParams(params, tblName, (KuduTable) srcTable, tbl);
+      createKuduTable(tbl, createTableParams, wantMinimalResult, response);
     } else {
       MetastoreShim.setTableLocation(catalog_.getDb(tbl.getDbName()), tbl);
       createTable(tbl, params.if_not_exists, null, params.server_name, null, null,
@@ -3915,6 +3919,50 @@ public class CatalogOpExecutor {
     }
   }
 
+  /**
+   * Build TCreateTableParams by source
+   */
+  private TCreateTableParams extractKuduCreateTableParams(TCreateTableLikeParams params,
+      TableName tblName, KuduTable kuduTable,
+      org.apache.hadoop.hive.metastore.api.Table tbl) throws ImpalaRuntimeException {
+    TCreateTableParams createTableParams = new TCreateTableParams();
+    createTableParams.if_not_exists = params.if_not_exists;
+    createTableParams.setComment(params.getComment());
+    List<TColumn> columns = new ArrayList<>();
+    for (Column col : kuduTable.getColumns()) {
+      // Omit cloning auto-incrementing column of Kudu table since the column will be
+      // created by Kudu engine.
+      if (((KuduColumn) col).isAutoIncrementing()) continue;
+      columns.add(col.toThrift());
+    }
+    createTableParams.setColumns(columns);
+    // Omit auto-incrementing column as primary key.
+    List<String> primaryColumnNames =
+        new ArrayList<>(kuduTable.getPrimaryKeyColumnNames());
+    if (kuduTable.hasAutoIncrementingColumn()) {
+      primaryColumnNames.remove(KuduUtil.getAutoIncrementingColumnName());
+    }
+    createTableParams.setPrimary_key_column_names(primaryColumnNames);
+
+    List<TKuduPartitionParam> partitionParams = new ArrayList<>();
+    for (KuduPartitionParam kuduPartitionParam : kuduTable.getPartitionBy()) {
+      partitionParams.add(kuduPartitionParam.toThrift());
+    }
+    createTableParams.setPartition_by(partitionParams);
+
+    Map<String, String> tableProperties = tbl.getParameters();
+    tableProperties.remove(KuduTable.KEY_TABLE_NAME);
+    tableProperties.remove(KuduTable.KEY_TABLE_ID);
+
+    String kuduMasters = tbl.getParameters().get(KuduTable.KEY_MASTER_HOSTS);
+    boolean isKuduHmsIntegrationEnabled = KuduTable.isHMSIntegrationEnabled(kuduMasters);
+    tableProperties.put(KuduTable.KEY_TABLE_NAME,
+        KuduUtil.getDefaultKuduTableName(
+            tblName.getDb(), tblName.getTbl(), isKuduHmsIntegrationEnabled));
+    tbl.setParameters(tableProperties);
+    return createTableParams;
+  }
+
   private static void setDefaultTableCapabilities(
       org.apache.hadoop.hive.metastore.api.Table tbl) {
     if (MetastoreShim.getMajorVersion() > 2) {
diff --git a/fe/src/main/java/org/apache/impala/util/KuduUtil.java b/fe/src/main/java/org/apache/impala/util/KuduUtil.java
index c8da69055..ece4f3726 100644
--- a/fe/src/main/java/org/apache/impala/util/KuduUtil.java
+++ b/fe/src/main/java/org/apache/impala/util/KuduUtil.java
@@ -493,4 +493,9 @@ public class KuduUtil {
     sb.append("PRIMARY KEY");
     return sb.toString();
   }
+
+  // Get auto-incrementing column name of Kudu table
+  public static String getAutoIncrementingColumnName() {
+    return Schema.getAutoIncrementingColumnName();
+  }
 }
diff --git a/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java b/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
index 30f541c03..5986fb1c8 100755
--- a/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AnalyzeDDLTest.java
@@ -2506,11 +2506,25 @@ public class AnalyzeDDLTest extends FrontendTestBase {
     AnalysisError("create table functional.baz like functional.alltypes location '  '",
         "URI path cannot be empty.");
 
-    // CREATE TABLE LIKE is not currently supported for Kudu tables (see IMPALA-4052)
+    // CREATE TABLE LIKE is only implements cloning between Kudu tables (see IMPALA-4052)
     AnalysisError("create table kudu_tbl like functional.alltypestiny stored as kudu",
-        "CREATE TABLE LIKE is not supported for Kudu tables");
-    AnalysisError("create table tbl like functional_kudu.dimtbl", "Cloning a Kudu " +
-        "table using CREATE TABLE LIKE is not supported.");
+        "functional.alltypestiny cannot be cloned into a KUDU table: " +
+        "CREATE TABLE LIKE is not supported between Kudu tables and non-Kudu tables.");
+    AnalysisError(
+        "create table kudu_to_parquet like functional_kudu.alltypes stored as parquet",
+        "functional_kudu.alltypes cannot be cloned into a PARQUET table: CREATE "
+            + "TABLE LIKE is not supported between Kudu tables and non-Kudu tables.");
+    AnalysisError("create table kudu_decimal_tbl_clone sort by (d1, d2) like "
+            + "functional_kudu.decimal_tbl",
+        "functional_kudu.decimal_tbl cannot be cloned "
+            + "because SORT BY is not supported for Kudu tables.");
+    AnalysisError(
+        "create table alltypestiny_clone sort by (d1, d2) like functional.alltypestiny " +
+        "stored as kudu", "functional.alltypestiny cannot be cloned into a KUDU table: " +
+        "CREATE TABLE LIKE is not supported between Kudu tables and non-Kudu tables.");
+    // Kudu tables with range partitions cannot be cloned
+    AnalysisError("create table kudu_jointbl_clone like functional_kudu.jointbl",
+        "CREATE TABLE LIKE is not supported for Kudu tables having range partitions.");
 
     // Test sort columns.
     AnalyzesOk("create table tbl sort by (int_col,id) like functional.alltypes");
diff --git a/testdata/datasets/functional/functional_schema_template.sql b/testdata/datasets/functional/functional_schema_template.sql
index 8ac045275..27b7f3cee 100644
--- a/testdata/datasets/functional/functional_schema_template.sql
+++ b/testdata/datasets/functional/functional_schema_template.sql
@@ -263,7 +263,7 @@ CREATE TABLE {db_name}{db_suffix}.{table_name} (
   year INT,
   month INT
 )
-PARTITION BY HASH (id) PARTITIONS 3 STORED AS KUDU;
+PARTITION BY HASH (id) PARTITIONS 3 COMMENT 'Tiny table' STORED AS KUDU;
 ---- DEPENDENT_LOAD_KUDU
 INSERT INTO TABLE {db_name}{db_suffix}.{table_name}
 SELECT id, bool_col, tinyint_col, smallint_col, int_col, bigint_col, float_col, double_col, date_string_col, string_col,
diff --git a/testdata/workloads/functional-query/queries/QueryTest/kudu_create_table_like_table.test b/testdata/workloads/functional-query/queries/QueryTest/kudu_create_table_like_table.test
new file mode 100644
index 000000000..96dd13a75
--- /dev/null
+++ b/testdata/workloads/functional-query/queries/QueryTest/kudu_create_table_like_table.test
@@ -0,0 +1,115 @@
+====
+---- QUERY
+# CREATE TABLE LIKE on Kudu table
+create table kudu_alltypes_clone like functional_kudu.alltypes
+stored as kudu
+---- RESULTS
+'Table has been created.'
+====
+---- QUERY
+# Make sure no data exists for this table
+select count(*) from kudu_alltypes_clone
+---- RESULTS
+0
+---- TYPES
+BIGINT
+====
+---- QUERY
+describe formatted kudu_alltypes_clone
+---- RESULTS: VERIFY_IS_SUBSET
+'# col_name            ','data_type           ','comment             '
+'','NULL','NULL'
+'id','int','NULL'
+'bool_col','boolean','NULL'
+'tinyint_col','tinyint','NULL'
+'smallint_col','smallint','NULL'
+'int_col','int','NULL'
+'bigint_col','bigint','NULL'
+'float_col','float','NULL'
+'double_col','double','NULL'
+'date_string_col','string','NULL'
+'string_col','string','NULL'
+'timestamp_col','timestamp','NULL'
+'year','int','NULL'
+'month','int','NULL'
+'','NULL','NULL'
+'# Detailed Table Information','NULL','NULL'
+'OwnerType:          ','USER                ','NULL'
+'LastAccessTime:     ','UNKNOWN             ','NULL'
+'Retention:          ','0                   ','NULL'
+'Table Parameters:','NULL','NULL'
+'','storage_handler     ','org.apache.hadoop.hive.kudu.KuduStorageHandler'
+'','NULL','NULL'
+'# Storage Information','NULL','NULL'
+'Compressed:         ','No                  ','NULL'
+'Num Buckets:        ','0                   ','NULL'
+'Bucket Columns:     ','[]                  ','NULL'
+'Sort Columns:       ','[]                  ','NULL'
+'','NULL','NULL'
+'# Constraints','NULL','NULL'
+---- TYPES
+string, string, string
+====
+---- QUERY
+# Should be able to insert into this table
+insert into kudu_alltypes_clone
+select id, bool_col, tinyint_col, smallint_col, int_col, bigint_col,
+float_col, double_col, date_string_col, string_col, timestamp_col,
+year, month
+from functional.alltypes where year=2009 and month=4
+---- RESULTS
+: 300
+====
+---- QUERY
+# Make sure we can read the new data.
+select count(*) from kudu_alltypes_clone
+---- RESULTS
+300
+---- TYPES
+BIGINT
+====
+---- QUERY
+# create table like having comment.
+create table kudu_alltypestiny_clone like functional_kudu.alltypestiny comment 'Tiny clone table'
+---- RESULTS
+'Table has been created.'
+====
+---- QUERY
+describe formatted kudu_alltypestiny_clone
+---- RESULTS: VERIFY_IS_SUBSET
+'# col_name            ','data_type           ','comment             '
+'','comment             ','Tiny clone table    '
+---- TYPES
+string, string, string
+====
+---- QUERY
+# No error is thrown when IF NOT EXISTS is specified and the table already exists.
+create table if not exists kudu_alltypes_clone like functional_kudu.alltypes
+---- RESULTS
+'Table already exists.'
+====
+---- QUERY
+# Create Kudu table with non unique primary key
+create table non_unique_key_create_tbl1 (id int non unique primary key, name string)
+partition by hash (id) partitions 3
+stored as kudu
+---- RESULTS
+'Table has been created.'
+====
+---- QUERY
+# create table like on Kudu table with non unique primary key
+create table non_unique_key_create_tbl1_clone like non_unique_key_create_tbl1
+---- RESULTS
+'Table has been created.'
+====
+---- QUERY
+describe non_unique_key_create_tbl1_clone
+---- LABELS
+NAME,TYPE,COMMENT,PRIMARY_KEY,KEY_UNIQUE,NULLABLE,DEFAULT_VALUE,ENCODING,COMPRESSION,BLOCK_SIZE
+---- RESULTS
+'id','int','','true','false','false','','AUTO_ENCODING','DEFAULT_COMPRESSION','0'
+'auto_incrementing_id','bigint','','true','false','false','','AUTO_ENCODING','DEFAULT_COMPRESSION','0'
+'name','string','','false','','true','','AUTO_ENCODING','DEFAULT_COMPRESSION','0'
+---- TYPES
+STRING,STRING,STRING,STRING,STRING,STRING,STRING,STRING,STRING,STRING
+====
diff --git a/tests/custom_cluster/test_kudu.py b/tests/custom_cluster/test_kudu.py
index d05517061..74155da95 100644
--- a/tests/custom_cluster/test_kudu.py
+++ b/tests/custom_cluster/test_kudu.py
@@ -336,6 +336,12 @@ class TestKuduHMSIntegration(CustomKuduTest):
   def test_kudu_alter_table(self, vector, unique_database):
     self.run_test_case('QueryTest/kudu_hms_alter', vector, use_db=unique_database)
 
+  @SkipIfKudu.no_hybrid_clock
+  def test_create_kudu_table_like(self, vector, unique_database):
+    self.run_test_case(
+      'QueryTest/kudu_create_table_like_table',
+      vector,
+      use_db=unique_database)
 
 class TestKuduTransactionBase(CustomClusterTestSuite):
   """
diff --git a/tests/query_test/test_kudu.py b/tests/query_test/test_kudu.py
index f9f728900..587431dd0 100644
--- a/tests/query_test/test_kudu.py
+++ b/tests/query_test/test_kudu.py
@@ -85,6 +85,13 @@ class TestKuduBasicDML(KuduTestSuite):
   def test_kudu_delete(self, vector, unique_database):
     self.run_test_case('QueryTest/kudu_delete', vector, use_db=unique_database)
 
+  @SkipIfKudu.no_hybrid_clock
+  def test_kudu_create_table_like_table(self, vector, unique_database):
+    self.run_test_case(
+      'QueryTest/kudu_create_table_like_table',
+      vector,
+      use_db=unique_database)
+
 # TODO(IMPALA-8614): parameterize some tests to run with HMS integration enabled.
 class TestKuduOperations(KuduTestSuite):
   """