You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by jo...@apache.org on 2022/09/22 22:30:09 UTC

[impala] branch master updated: IMPALA-11583: Use Iceberg API to update stats

This is an automated email from the ASF dual-hosted git repository.

joemcdonnell pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git


The following commit(s) were added to refs/heads/master by this push:
     new 3f382b7eb IMPALA-11583: Use Iceberg API to update stats
3f382b7eb is described below

commit 3f382b7ebbd66a5a02270e14ff493bd9607c0b94
Author: Zoltan Borok-Nagy <bo...@cloudera.com>
AuthorDate: Fri Sep 16 15:42:03 2022 +0200

    IMPALA-11583: Use Iceberg API to update stats
    
    Before this patch we used HMS API alter_table() to update an Iceberg
    table's statistics. 'alter_table()' API calls are unsafe for Iceberg
    tables as they overwrite the whole HMS table, including the table
    property 'metadata_location' which must always point to the latest
    snapshot. Hence concurrent modification to the same table could be
    reverted by COMPUTE STATS.
    
    In this patch we are using Iceberg API to update Iceberg tables.
    Also, table-level stats (e.g. numRows, totalSize, totalFiles) are not
    set as Iceberg keeps them up-to-date.
    
    COMPUTE INCREMENTAL STATS without partition clause is the same as
    plain COMPUTE STATS for Iceberg tables. This behavior is aligned
    with current behavior on non-partitioned tables:
    https://impala.apache.org/docs/build/html/topics/impala_compute_stats.html
    
    COMPUTE INCREMENTAL STATS .. PARTITION raises an error.
    
    DROP STATS has been also modified to not drop table-level stats for
    HMS-integrated Iceberg tables.
    
    Testing:
     * added e2e tests for COMPUTE STATS
     * added e2e tests for DROP STATS
     * manually tested concurrent Hive INSERT and Impala COMPUTE STATS
       using latest Hive
     * opened IMPALA-11590 to add automated interop tests with Hive
    
    Change-Id: I46b6e0a5a65e18e5aaf2a007ec0242b28e0fed92
    Reviewed-on: http://gerrit.cloudera.org:8080/18995
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 .../apache/impala/analysis/ComputeStatsStmt.java   |   8 +
 .../apache/impala/service/CatalogOpExecutor.java   |  48 ++-
 .../queries/QueryTest/iceberg-compute-stats.test   | 392 +++++++++++++++++++++
 tests/query_test/test_iceberg.py                   |   3 +
 4 files changed, 448 insertions(+), 3 deletions(-)

diff --git a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
index 124315860..13f154256 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ComputeStatsStmt.java
@@ -413,6 +413,14 @@ public class ComputeStatsStmt extends StatementBase {
       isIncremental_ = false;
     }
 
+    if (table_ instanceof FeIcebergTable) {
+      if (partitionSet_ != null) {
+        throw new AnalysisException("COMPUTE INCREMENTAL ... PARTITION not supported " +
+            "for Iceberg table " + tableName_);
+      }
+      isIncremental_ = false;
+    }
+
     if (columnWhitelist_ != null) {
       validatedColumnWhitelist_ = new HashSet<>();
       for (String colName : columnWhitelist_) {
diff --git a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
index 1be372d36..4771375f4 100644
--- a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
+++ b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
@@ -1663,9 +1663,13 @@ public class CatalogOpExecutor {
       Table.updateTimestampProperty(msTbl, HdfsTable.TBL_PROP_LAST_COMPUTE_STATS_TIME);
     }
 
-    // Apply property changes like numRows.
-    msTbl.getParameters().remove(StatsSetupConst.COLUMN_STATS_ACCURATE);
-    applyAlterTable(msTbl, false, tblTxn);
+    if (IcebergTable.isIcebergTable(msTbl) && isIcebergHmsIntegrationEnabled(msTbl)) {
+      updateTableStatsViaIceberg((IcebergTable)table, msTbl);
+    } else {
+      // Apply property changes like numRows.
+      msTbl.getParameters().remove(StatsSetupConst.COLUMN_STATS_ACCURATE);
+      applyAlterTable(msTbl, false, tblTxn);
+    }
     numUpdatedPartitions.setRef(0L);
     if (modifiedParts != null) {
       numUpdatedPartitions.setRef((long) modifiedParts.size());
@@ -1674,6 +1678,37 @@ public class CatalogOpExecutor {
     }
   }
 
+  /**
+   * For Iceberg tables using HiveCatalog we must avoid updating the HMS table directly to
+   * avoid overriding concurrent modifications to the table. See IMPALA-11583.
+   * Table-level stats (numRows, totalSize) should not be set as Iceberg keeps them
+   * up-to-date.
+   * 'impala.lastComputeStatsTime' still needs to be set, so we'll know when we executed
+   * COMPUTE STATS the last time.
+   * We need to set catalog service id and catalog version to detect self-events.
+   */
+  private void updateTableStatsViaIceberg(IcebergTable iceTbl,
+      org.apache.hadoop.hive.metastore.api.Table msTbl) throws ImpalaException {
+    String CATALOG_SERVICE_ID = MetastoreEventPropertyKey.CATALOG_SERVICE_ID.getKey();
+    String CATALOG_VERSION    = MetastoreEventPropertyKey.CATALOG_VERSION.getKey();
+    String COMPUTE_STATS_TIME = HdfsTable.TBL_PROP_LAST_COMPUTE_STATS_TIME;
+
+    Preconditions.checkState(msTbl.getParameters().containsKey(CATALOG_SERVICE_ID));
+    Preconditions.checkState(msTbl.getParameters().containsKey(CATALOG_VERSION));
+
+    Map<String, String> props = new HashMap<>();
+    props.put(CATALOG_SERVICE_ID, msTbl.getParameters().get(CATALOG_SERVICE_ID));
+    props.put(CATALOG_VERSION,    msTbl.getParameters().get(CATALOG_VERSION));
+    if (msTbl.getParameters().containsKey(COMPUTE_STATS_TIME)) {
+      props.put(COMPUTE_STATS_TIME, msTbl.getParameters().get(COMPUTE_STATS_TIME));
+    }
+
+    org.apache.iceberg.Transaction iceTxn = IcebergUtil.getIcebergTransaction(iceTbl);
+    IcebergCatalogOpExecutor.setTblProperties(iceTxn, props);
+    iceTxn.commitTransaction();
+  }
+
+
   /**
    * Updates the row counts and incremental column stats of the partitions in the given
    * Impala table based on the given update stats parameters. Returns the modified Impala
@@ -2342,6 +2377,13 @@ public class CatalogOpExecutor {
     Preconditions.checkState(table.isWriteLockedByCurrentThread());
     // Delete the ROW_COUNT from the table (if it was set).
     org.apache.hadoop.hive.metastore.api.Table msTbl = table.getMetaStoreTable();
+    boolean isIntegratedIcebergTbl =
+        IcebergTable.isIcebergTable(msTbl) && isIcebergHmsIntegrationEnabled(msTbl);
+    if (isIntegratedIcebergTbl) {
+      // We shouldn't modify table-level stats of HMS-integrated Iceberg tables as these
+      // stats are managed by Iceberg.
+      return 0;
+    }
     int numTargetedPartitions = 0;
     boolean droppedRowCount =
         msTbl.getParameters().remove(StatsSetupConst.ROW_COUNT) != null;
diff --git a/testdata/workloads/functional-query/queries/QueryTest/iceberg-compute-stats.test b/testdata/workloads/functional-query/queries/QueryTest/iceberg-compute-stats.test
new file mode 100644
index 000000000..e10a39cd2
--- /dev/null
+++ b/testdata/workloads/functional-query/queries/QueryTest/iceberg-compute-stats.test
@@ -0,0 +1,392 @@
+====
+---- QUERY
+create table ice_alltypes stored as iceberg
+as select * from functional_parquet.iceberg_alltypes_part;
+show column stats ice_alltypes;
+---- RESULTS
+'i','INT',-1,-1,4,4,-1,-1
+'p_bool','BOOLEAN',-1,-1,1,1,-1,-1
+'p_int','INT',-1,-1,4,4,-1,-1
+'p_bigint','BIGINT',-1,-1,8,8,-1,-1
+'p_float','FLOAT',-1,-1,4,4,-1,-1
+'p_double','DOUBLE',-1,-1,8,8,-1,-1
+'p_decimal','DECIMAL(6,3)',-1,-1,4,4,-1,-1
+'p_date','DATE',-1,-1,4,4,-1,-1
+'p_string','STRING',-1,-1,-1,-1,-1,-1
+---- TYPES
+STRING, STRING, BIGINT, BIGINT, BIGINT, DOUBLE, BIGINT, BIGINT
+====
+---- QUERY
+# Table-level stats are automatically updated.
+# 'impala.lastComputeStatsTime' is not set yet.
+describe formatted ice_alltypes;
+---- RESULTS: VERIFY_IS_SUBSET
+'','numFiles            ','1                   '
+'','numRows             ','2                   '
+row_regex:'','totalSize           ','\d+\s+'
+---- RESULTS: VERIFY_IS_NOT_IN
+row_regex:'','impala.lastComputeStatsTime','\d+\s+'
+---- TYPES
+STRING, STRING, STRING
+====
+---- QUERY
+compute stats ice_alltypes;
+show column stats ice_alltypes;
+---- RESULTS
+'i','INT',2,0,4,4,-1,-1
+'p_bool','BOOLEAN',2,0,1,1,2,0
+'p_int','INT',1,0,4,4,-1,-1
+'p_bigint','BIGINT',1,0,8,8,-1,-1
+'p_float','FLOAT',1,0,4,4,-1,-1
+'p_double','DOUBLE',1,0,8,8,-1,-1
+'p_decimal','DECIMAL(6,3)',1,0,4,4,-1,-1
+'p_date','DATE',1,0,4,4,-1,-1
+'p_string','STRING',1,0,6,6,-1,-1
+---- TYPES
+STRING, STRING, BIGINT, BIGINT, BIGINT, DOUBLE, BIGINT, BIGINT
+====
+---- QUERY
+describe formatted ice_alltypes;
+---- RESULTS: VERIFY_IS_SUBSET
+row_regex:'','impala.lastComputeStatsTime','\d+\s+'
+'','numFiles            ','1                   '
+'','numRows             ','2                   '
+row_regex:'','totalSize           ','\d+\s+'
+---- TYPES
+STRING, STRING, STRING
+====
+---- QUERY
+drop stats ice_alltypes;
+---- RESULTS
+'Stats have been dropped.'
+---- TYPES
+STRING
+====
+---- QUERY
+alter table ice_alltypes unset tblproperties('impala.lastComputeStatsTime');
+---- RESULTS
+'Updated table.'
+---- TYPES
+STRING
+====
+---- QUERY
+# Table-level stats are not affected by DROP STATS.
+describe formatted ice_alltypes;
+---- RESULTS: VERIFY_IS_SUBSET
+'','numFiles            ','1                   '
+'','numRows             ','2                   '
+row_regex:'','totalSize           ','\d+\s+'
+---- RESULTS: VERIFY_IS_NOT_IN
+row_regex:'','impala.lastComputeStatsTime','\d+\s+'
+---- TYPES
+STRING, STRING, STRING
+====
+---- QUERY
+# Column stats have been dropped.
+show column stats ice_alltypes;
+---- RESULTS
+'i','INT',-1,-1,4,4,-1,-1
+'p_bool','BOOLEAN',-1,-1,1,1,-1,-1
+'p_int','INT',-1,-1,4,4,-1,-1
+'p_bigint','BIGINT',-1,-1,8,8,-1,-1
+'p_float','FLOAT',-1,-1,4,4,-1,-1
+'p_double','DOUBLE',-1,-1,8,8,-1,-1
+'p_decimal','DECIMAL(6,3)',-1,-1,4,4,-1,-1
+'p_date','DATE',-1,-1,4,4,-1,-1
+'p_string','STRING',-1,-1,-1,-1,-1,-1
+---- TYPES
+STRING, STRING, BIGINT, BIGINT, BIGINT, DOUBLE, BIGINT, BIGINT
+====
+---- QUERY
+compute incremental stats ice_alltypes;
+show column stats ice_alltypes;
+---- RESULTS
+'i','INT',2,0,4,4,-1,-1
+'p_bool','BOOLEAN',2,0,1,1,2,0
+'p_int','INT',1,0,4,4,-1,-1
+'p_bigint','BIGINT',1,0,8,8,-1,-1
+'p_float','FLOAT',1,0,4,4,-1,-1
+'p_double','DOUBLE',1,0,8,8,-1,-1
+'p_decimal','DECIMAL(6,3)',1,0,4,4,-1,-1
+'p_date','DATE',1,0,4,4,-1,-1
+'p_string','STRING',1,0,6,6,-1,-1
+---- TYPES
+STRING, STRING, BIGINT, BIGINT, BIGINT, DOUBLE, BIGINT, BIGINT
+====
+---- QUERY
+describe formatted ice_alltypes;
+---- RESULTS: VERIFY_IS_SUBSET
+row_regex:'','impala.lastComputeStatsTime','\d+\s+'
+'','numFiles            ','1                   '
+'','numRows             ','2                   '
+row_regex:'','totalSize           ','\d+\s+'
+---- TYPES
+STRING, STRING, STRING
+====
+---- QUERY
+# Setting 'numRows' doesn't have effect on Iceberg tables.
+ALTER TABLE ice_alltypes
+SET TBLPROPERTIES('numRows'='1000', 'STATS_GENERATED_VIA_STATS_TASK'='true');
+describe formatted ice_alltypes;
+---- RESULTS: VERIFY_IS_SUBSET
+row_regex:'','impala.lastComputeStatsTime','\d+\s+'
+'','numRows             ','2                   '
+---- TYPES
+STRING, STRING, STRING
+====
+---- QUERY
+# Users can still set column stats manually as they are managed outside of Iceberg.
+ALTER TABLE ice_alltypes SET COLUMN STATS i ('numDVs'='100');
+show column stats ice_alltypes;
+---- RESULTS
+'i','INT',100,0,4,4,-1,-1
+'p_bool','BOOLEAN',2,0,1,1,2,0
+'p_int','INT',1,0,4,4,-1,-1
+'p_bigint','BIGINT',1,0,8,8,-1,-1
+'p_float','FLOAT',1,0,4,4,-1,-1
+'p_double','DOUBLE',1,0,8,8,-1,-1
+'p_decimal','DECIMAL(6,3)',1,0,4,4,-1,-1
+'p_date','DATE',1,0,4,4,-1,-1
+'p_string','STRING',1,0,6,6,-1,-1
+---- TYPES
+STRING, STRING, BIGINT, BIGINT, BIGINT, DOUBLE, BIGINT, BIGINT
+====
+---- QUERY
+drop stats ice_alltypes;
+---- RESULTS
+'Stats have been dropped.'
+---- TYPES
+STRING
+====
+---- QUERY
+# Table-level stats are not affected by DROP STATS.
+describe formatted ice_alltypes;
+---- RESULTS: VERIFY_IS_SUBSET
+'','numFiles            ','1                   '
+'','numRows             ','2                   '
+row_regex:'','totalSize           ','\d+\s+'
+---- TYPES
+STRING, STRING, STRING
+====
+---- QUERY
+# Column stats have been dropped.
+show column stats ice_alltypes;
+---- RESULTS
+'i','INT',-1,-1,4,4,-1,-1
+'p_bool','BOOLEAN',-1,-1,1,1,-1,-1
+'p_int','INT',-1,-1,4,4,-1,-1
+'p_bigint','BIGINT',-1,-1,8,8,-1,-1
+'p_float','FLOAT',-1,-1,4,4,-1,-1
+'p_double','DOUBLE',-1,-1,8,8,-1,-1
+'p_decimal','DECIMAL(6,3)',-1,-1,4,4,-1,-1
+'p_date','DATE',-1,-1,4,4,-1,-1
+'p_string','STRING',-1,-1,-1,-1,-1,-1
+---- TYPES
+STRING, STRING, BIGINT, BIGINT, BIGINT, DOUBLE, BIGINT, BIGINT
+====
+---- QUERY
+create table ice_alltypes_ht stored as iceberg
+tblproperties ('iceberg.catalog'='hadoop.tables')
+as select * from functional_parquet.iceberg_alltypes_part;
+show column stats ice_alltypes_ht;
+---- RESULTS
+'i','INT',-1,-1,4,4,-1,-1
+'p_bool','BOOLEAN',-1,-1,1,1,-1,-1
+'p_int','INT',-1,-1,4,4,-1,-1
+'p_bigint','BIGINT',-1,-1,8,8,-1,-1
+'p_float','FLOAT',-1,-1,4,4,-1,-1
+'p_double','DOUBLE',-1,-1,8,8,-1,-1
+'p_decimal','DECIMAL(6,3)',-1,-1,4,4,-1,-1
+'p_date','DATE',-1,-1,4,4,-1,-1
+'p_string','STRING',-1,-1,-1,-1,-1,-1
+---- TYPES
+STRING, STRING, BIGINT, BIGINT, BIGINT, DOUBLE, BIGINT, BIGINT
+====
+---- QUERY
+# Table-level stats are not automatically updated for non-HMS integrated
+# Iceberg tables.
+# 'impala.lastComputeStatsTime' is not set yet.
+describe formatted ice_alltypes_ht;
+---- RESULTS: VERIFY_IS_NOT_IN
+'','numFiles            ','1                   '
+'','numRows             ','2                   '
+row_regex:'','totalSize           ','\d+\s+'
+row_regex:'','impala.lastComputeStatsTime','\d+\s+'
+---- TYPES
+STRING, STRING, STRING
+====
+---- QUERY
+compute stats ice_alltypes_ht;
+show column stats ice_alltypes_ht;
+---- RESULTS
+'i','INT',2,0,4,4,-1,-1
+'p_bool','BOOLEAN',2,0,1,1,2,0
+'p_int','INT',1,0,4,4,-1,-1
+'p_bigint','BIGINT',1,0,8,8,-1,-1
+'p_float','FLOAT',1,0,4,4,-1,-1
+'p_double','DOUBLE',1,0,8,8,-1,-1
+'p_decimal','DECIMAL(6,3)',1,0,4,4,-1,-1
+'p_date','DATE',1,0,4,4,-1,-1
+'p_string','STRING',1,0,6,6,-1,-1
+---- TYPES
+STRING, STRING, BIGINT, BIGINT, BIGINT, DOUBLE, BIGINT, BIGINT
+====
+---- QUERY
+describe formatted ice_alltypes_ht;
+---- RESULTS: VERIFY_IS_SUBSET
+row_regex:'','impala.lastComputeStatsTime','\d+\s+'
+'','numRows             ','2                   '
+row_regex:'','totalSize           ','\d+\s+'
+---- TYPES
+STRING, STRING, STRING
+====
+---- QUERY
+drop stats ice_alltypes_ht;
+---- RESULTS
+'Stats have been dropped.'
+---- TYPES
+STRING
+====
+---- QUERY
+alter table ice_alltypes_ht unset tblproperties('impala.lastComputeStatsTime');
+---- RESULTS
+'Updated table.'
+---- TYPES
+STRING
+====
+---- QUERY
+# Table-level stats are dropped for non-HMS integrated Iceberg tables.
+describe formatted ice_alltypes_ht;
+---- RESULTS: VERIFY_IS_NOT_IN
+'','numRows             ','2                   '
+row_regex:'','impala.lastComputeStatsTime','\d+\s+'
+---- TYPES
+STRING, STRING, STRING
+====
+---- QUERY
+# Column stats have been dropped.
+show column stats ice_alltypes_ht;
+---- RESULTS
+'i','INT',-1,-1,4,4,-1,-1
+'p_bool','BOOLEAN',-1,-1,1,1,-1,-1
+'p_int','INT',-1,-1,4,4,-1,-1
+'p_bigint','BIGINT',-1,-1,8,8,-1,-1
+'p_float','FLOAT',-1,-1,4,4,-1,-1
+'p_double','DOUBLE',-1,-1,8,8,-1,-1
+'p_decimal','DECIMAL(6,3)',-1,-1,4,4,-1,-1
+'p_date','DATE',-1,-1,4,4,-1,-1
+'p_string','STRING',-1,-1,-1,-1,-1,-1
+---- TYPES
+STRING, STRING, BIGINT, BIGINT, BIGINT, DOUBLE, BIGINT, BIGINT
+====
+---- QUERY
+compute incremental stats ice_alltypes_ht;
+show column stats ice_alltypes_ht;
+---- RESULTS
+'i','INT',2,0,4,4,-1,-1
+'p_bool','BOOLEAN',2,0,1,1,2,0
+'p_int','INT',1,0,4,4,-1,-1
+'p_bigint','BIGINT',1,0,8,8,-1,-1
+'p_float','FLOAT',1,0,4,4,-1,-1
+'p_double','DOUBLE',1,0,8,8,-1,-1
+'p_decimal','DECIMAL(6,3)',1,0,4,4,-1,-1
+'p_date','DATE',1,0,4,4,-1,-1
+'p_string','STRING',1,0,6,6,-1,-1
+---- TYPES
+STRING, STRING, BIGINT, BIGINT, BIGINT, DOUBLE, BIGINT, BIGINT
+====
+---- QUERY
+describe formatted ice_alltypes_ht;
+---- RESULTS: VERIFY_IS_SUBSET
+row_regex:'','impala.lastComputeStatsTime','\d+\s+'
+'','numRows             ','2                   '
+---- TYPES
+STRING, STRING, STRING
+====
+---- QUERY
+# Users can set 'numRows' for non-HMS integrated Iceberfg tables.
+ALTER TABLE ice_alltypes_ht
+SET TBLPROPERTIES('numRows'='1000', 'STATS_GENERATED_VIA_STATS_TASK'='true');
+describe formatted ice_alltypes_ht;
+---- RESULTS: VERIFY_IS_SUBSET
+'','numRows             ','1000                '
+---- TYPES
+STRING, STRING, STRING
+====
+---- QUERY
+# Users can set column stats manually.
+ALTER TABLE ice_alltypes_ht SET COLUMN STATS i ('numDVs'='100');
+show column stats ice_alltypes_ht;
+---- RESULTS
+'i','INT',100,0,4,4,-1,-1
+'p_bool','BOOLEAN',2,0,1,1,2,0
+'p_int','INT',1,0,4,4,-1,-1
+'p_bigint','BIGINT',1,0,8,8,-1,-1
+'p_float','FLOAT',1,0,4,4,-1,-1
+'p_double','DOUBLE',1,0,8,8,-1,-1
+'p_decimal','DECIMAL(6,3)',1,0,4,4,-1,-1
+'p_date','DATE',1,0,4,4,-1,-1
+'p_string','STRING',1,0,6,6,-1,-1
+---- TYPES
+STRING, STRING, BIGINT, BIGINT, BIGINT, DOUBLE, BIGINT, BIGINT
+====
+---- QUERY
+drop stats ice_alltypes_ht;
+---- RESULTS
+'Stats have been dropped.'
+---- TYPES
+STRING
+====
+---- QUERY
+# Table-level stats are dropped for non-HMS integrated Iceberg tables.
+describe formatted ice_alltypes_ht;
+---- RESULTS: VERIFY_IS_NOT_IN
+'','numFiles            ','1                   '
+'','numRows             ','2                   '
+row_regex:'','totalSize           ','\d+\s+'
+---- TYPES
+STRING, STRING, STRING
+====
+---- QUERY
+# Column stats have been dropped.
+show column stats ice_alltypes_ht;
+---- RESULTS
+'i','INT',-1,-1,4,4,-1,-1
+'p_bool','BOOLEAN',-1,-1,1,1,-1,-1
+'p_int','INT',-1,-1,4,4,-1,-1
+'p_bigint','BIGINT',-1,-1,8,8,-1,-1
+'p_float','FLOAT',-1,-1,4,4,-1,-1
+'p_double','DOUBLE',-1,-1,8,8,-1,-1
+'p_decimal','DECIMAL(6,3)',-1,-1,4,4,-1,-1
+'p_date','DATE',-1,-1,4,4,-1,-1
+'p_string','STRING',-1,-1,-1,-1,-1,-1
+---- TYPES
+STRING, STRING, BIGINT, BIGINT, BIGINT, DOUBLE, BIGINT, BIGINT
+====
+---- QUERY
+# COMPUTE INCREMENTAL STATS without partition clause fall back to original
+# COMPUTE STATS
+create table ice_alltypes_part
+partitioned by spec (i)
+stored as iceberg
+as select * from functional_parquet.iceberg_alltypes_part;
+COMPUTE INCREMENTAL STATS ice_alltypes_part;
+show column stats ice_alltypes_part;
+---- RESULTS
+'i','INT',2,0,4,4,-1,-1
+'p_bool','BOOLEAN',2,0,1,1,2,0
+'p_int','INT',1,0,4,4,-1,-1
+'p_bigint','BIGINT',1,0,8,8,-1,-1
+'p_float','FLOAT',1,0,4,4,-1,-1
+'p_double','DOUBLE',1,0,8,8,-1,-1
+'p_decimal','DECIMAL(6,3)',1,0,4,4,-1,-1
+'p_date','DATE',1,0,4,4,-1,-1
+'p_string','STRING',1,0,6,6,-1,-1
+---- TYPES
+STRING, STRING, BIGINT, BIGINT, BIGINT, DOUBLE, BIGINT, BIGINT
+====
+---- QUERY
+COMPUTE INCREMENTAL STATS ice_alltypes_part PARTITION (i=1);
+---- CATCH
+COMPUTE INCREMENTAL ... PARTITION not supported for Iceberg table
+====
diff --git a/tests/query_test/test_iceberg.py b/tests/query_test/test_iceberg.py
index 6ed0bd908..8ee20efa2 100644
--- a/tests/query_test/test_iceberg.py
+++ b/tests/query_test/test_iceberg.py
@@ -844,6 +844,9 @@ class TestIcebergTable(IcebergTestSuite):
 
     assert parquet_column_name_type_list == iceberg_column_name_type_list
 
+  def test_compute_stats(self, vector, unique_database):
+    self.run_test_case('QueryTest/iceberg-compute-stats', vector, unique_database)
+
 
 class TestIcebergV2Table(IcebergTestSuite):
   """Tests related to Iceberg V2 tables."""