You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by st...@apache.org on 2022/10/24 13:07:57 UTC

[impala] 03/03: IMPALA-11608: Fix SHOW TABLE STATS iceberg_tbl shows wrong number of files

This is an automated email from the ASF dual-hosted git repository.

stigahuang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 3973fc6d09dd1bc2abaae1e75e151f0f167f6602
Author: LPL <li...@sensorsdata.cn>
AuthorDate: Tue Oct 18 20:42:36 2022 +0800

    IMPALA-11608: Fix SHOW TABLE STATS iceberg_tbl shows wrong number of files
    
    Impala SHOW TABLE stats outputs wrong value for number of files for
    Iceberg tables. It should only calculate the number of data files and
    delete files, but it calculates all files under the table directory,
    including metadata files, orphaned files, and old data files not
    belonging to the current snapshot.
    
    Testing:
     - add e2e tests
    
    Change-Id: I110e5e13cec3aa898f115e1ed795ce98e68ef06c
    Reviewed-on: http://gerrit.cloudera.org:8080/19150
    Reviewed-by: Impala Public Jenkins <im...@cloudera.com>
    Tested-by: Impala Public Jenkins <im...@cloudera.com>
---
 common/thrift/CatalogObjects.thrift                |   1 +
 .../java/org/apache/impala/catalog/FeFsTable.java  |  20 +--
 .../org/apache/impala/catalog/FeIcebergTable.java  | 176 +++++++++++++++----
 .../java/org/apache/impala/service/Frontend.java   |  45 ++---
 .../queries/QueryTest/iceberg-compute-stats.test   |   9 +
 .../iceberg-v2-read-position-deletes-orc.test      | 138 ++++++++++++++-
 .../iceberg-v2-read-position-deletes.test          | 192 +++++++++++++++++++++
 7 files changed, 497 insertions(+), 84 deletions(-)

diff --git a/common/thrift/CatalogObjects.thrift b/common/thrift/CatalogObjects.thrift
index 6e3203d77..a217d65dc 100644
--- a/common/thrift/CatalogObjects.thrift
+++ b/common/thrift/CatalogObjects.thrift
@@ -578,6 +578,7 @@ struct TIcebergPartitionSpec {
 struct TIcebergPartitionStats {
   1: required i64 num_files;
   2: required i64 num_rows;
+  3: required i64 file_size_in_bytes;
 }
 
 struct TIcebergTable {
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java b/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java
index f112ce576..9d1e59eb6 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FeFsTable.java
@@ -440,7 +440,7 @@ public interface FeFsTable extends FeTable {
       result.setRows(new ArrayList<>());
 
       if (table instanceof FeIcebergTable) {
-        return getIcebergTableFiles((FeIcebergTable) table, result);
+        return FeIcebergTable.Utils.getIcebergTableFiles((FeIcebergTable) table, result);
       }
 
       List<? extends FeFsPartition> orderedPartitions;
@@ -466,24 +466,6 @@ public interface FeFsTable extends FeTable {
       return result;
     }
 
-    /**
-     * Get file info for the given fe iceberg table.
-     */
-    private static TResultSet getIcebergTableFiles(FeIcebergTable table,
-        TResultSet result) {
-      List<FileDescriptor> orderedFds = Lists
-          .newArrayList(table.getPathHashToFileDescMap().values());
-      Collections.sort(orderedFds);
-      for (FileDescriptor fd : orderedFds) {
-        TResultRowBuilder rowBuilder = new TResultRowBuilder();
-        rowBuilder.add(fd.getAbsolutePath(table.getLocation()));
-        rowBuilder.add(PrintUtils.printBytes(fd.getFileLength()));
-        rowBuilder.add("");
-        result.addToRows(rowBuilder.get());
-      }
-      return result;
-    }
-
     /**
      * Selects a random sample of files from the given list of partitions such that the
      * sum of file sizes is at least 'percentBytes' percent of the total number of bytes
diff --git a/fe/src/main/java/org/apache/impala/catalog/FeIcebergTable.java b/fe/src/main/java/org/apache/impala/catalog/FeIcebergTable.java
index 37b657b0c..72d56ddba 100644
--- a/fe/src/main/java/org/apache/impala/catalog/FeIcebergTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/FeIcebergTable.java
@@ -27,6 +27,7 @@ import java.io.IOException;
 import java.net.URI;
 import java.util.ArrayList;
 import java.util.Collection;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
 import java.util.List;
@@ -45,6 +46,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.iceberg.ContentFile;
 import org.apache.iceberg.DataFile;
 import org.apache.iceberg.DeleteFile;
+import org.apache.iceberg.FileContent;
 import org.apache.iceberg.PartitionField;
 import org.apache.iceberg.PartitionSpec;
 import org.apache.iceberg.Schema;
@@ -59,11 +61,14 @@ import org.apache.impala.analysis.IcebergPartitionSpec;
 import org.apache.impala.analysis.LiteralExpr;
 import org.apache.impala.analysis.TimeTravelSpec;
 import org.apache.impala.analysis.TimeTravelSpec.Kind;
+import org.apache.impala.catalog.HdfsPartition.FileBlock;
 import org.apache.impala.catalog.HdfsPartition.FileDescriptor;
 import org.apache.impala.common.FileSystemUtil;
 import org.apache.impala.common.Pair;
+import org.apache.impala.common.PrintUtils;
 import org.apache.impala.common.Reference;
 import org.apache.impala.compat.HdfsShim;
+import org.apache.impala.fb.FbFileBlock;
 import org.apache.impala.thrift.TColumn;
 import org.apache.impala.thrift.TCompressionCodec;
 import org.apache.impala.thrift.THdfsCompression;
@@ -77,6 +82,7 @@ import org.apache.impala.thrift.TIcebergTable;
 import org.apache.impala.thrift.TNetworkAddress;
 import org.apache.impala.thrift.TResultSet;
 import org.apache.impala.thrift.TResultSetMetadata;
+import org.apache.impala.util.HdfsCachingUtil;
 import org.apache.impala.util.IcebergUtil;
 import org.apache.impala.util.ListMap;
 import org.apache.impala.util.TResultRowBuilder;
@@ -95,7 +101,11 @@ public interface FeIcebergTable extends FeFsTable {
    */
   Map<String, HdfsPartition.FileDescriptor> getPathHashToFileDescMap();
 
+  /**
+   * Return the partition stats from iceberg table
+   */
   Map<String, TIcebergPartitionStats> getIcebergPartitionStats();
+
   /**
    * Return the hdfs table transformed from iceberg table
    */
@@ -313,8 +323,28 @@ public interface FeIcebergTable extends FeFsTable {
       return format == HdfsFileFormat.PARQUET || format == HdfsFileFormat.ORC;
     }
 
-    public static TResultSet getPartitionStats(FeIcebergTable table)
-        throws TableLoadingException {
+    /**
+     * Get files info for the given fe iceberg table.
+     */
+    public static TResultSet getIcebergTableFiles(FeIcebergTable table,
+        TResultSet result) {
+      List<FileDescriptor> orderedFds = Lists
+          .newArrayList(table.getPathHashToFileDescMap().values());
+      Collections.sort(orderedFds);
+      for (FileDescriptor fd : orderedFds) {
+        TResultRowBuilder rowBuilder = new TResultRowBuilder();
+        rowBuilder.add(fd.getAbsolutePath(table.getLocation()));
+        rowBuilder.add(PrintUtils.printBytes(fd.getFileLength()));
+        rowBuilder.add("");
+        result.addToRows(rowBuilder.get());
+      }
+      return result;
+    }
+
+    /**
+     * Get partition stats for the given fe iceberg table.
+     */
+    public static TResultSet getPartitionStats(FeIcebergTable table) {
       TResultSet result = new TResultSet();
       TResultSetMetadata resultSchema = new TResultSetMetadata();
       result.setSchema(resultSchema);
@@ -324,16 +354,9 @@ public interface FeIcebergTable extends FeFsTable {
       resultSchema.addToColumns(new TColumn("Number Of Rows", Type.BIGINT.toThrift()));
       resultSchema.addToColumns(new TColumn("Number Of Files", Type.BIGINT.toThrift()));
 
-      Map<String, TIcebergPartitionStats> fieldNameToPartitionValue =
-          table.getIcebergPartitionStats()
-              .entrySet()
-              .stream()
-              .sorted(Map.Entry.comparingByKey())
-              .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue,
-                  (oldValue, newValue) -> oldValue, LinkedHashMap::new));
-
-      for (Map.Entry<String, TIcebergPartitionStats> partitionInfo :
-          fieldNameToPartitionValue.entrySet()) {
+      Map<String, TIcebergPartitionStats> nameToStats = getOrderedPartitionStats(table);
+      for (Map.Entry<String, TIcebergPartitionStats> partitionInfo : nameToStats
+          .entrySet()) {
         TResultRowBuilder builder = new TResultRowBuilder();
         builder.add(partitionInfo.getKey());
         builder.add(partitionInfo.getValue().getNum_rows());
@@ -343,6 +366,76 @@ public interface FeIcebergTable extends FeFsTable {
       return result;
     }
 
+    /**
+     * Get partition stats for the given fe iceberg table ordered by partition name.
+     */
+    private static Map<String, TIcebergPartitionStats> getOrderedPartitionStats(
+        FeIcebergTable table) {
+      return table.getIcebergPartitionStats()
+          .entrySet()
+          .stream()
+          .sorted(Map.Entry.comparingByKey())
+          .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue,
+              (oldValue, newValue) -> oldValue, LinkedHashMap::new));
+    }
+
+    /**
+     * Get table stats for the given fe iceberg table.
+     */
+    public static TResultSet getTableStats(FeIcebergTable table) {
+      TResultSet result = new TResultSet();
+
+      TResultSetMetadata resultSchema = new TResultSetMetadata();
+      resultSchema.addToColumns(new TColumn("#Rows", Type.BIGINT.toThrift()));
+      resultSchema.addToColumns(new TColumn("#Files", Type.BIGINT.toThrift()));
+      resultSchema.addToColumns(new TColumn("Size", Type.STRING.toThrift()));
+      resultSchema.addToColumns(new TColumn("Bytes Cached", Type.STRING.toThrift()));
+      resultSchema.addToColumns(new TColumn("Cache Replication", Type.STRING.toThrift()));
+      resultSchema.addToColumns(new TColumn("Format", Type.STRING.toThrift()));
+      resultSchema.addToColumns(new TColumn("Incremental stats", Type.STRING.toThrift()));
+      resultSchema.addToColumns(new TColumn("Location", Type.STRING.toThrift()));
+      result.setSchema(resultSchema);
+
+      TResultRowBuilder rowBuilder = new TResultRowBuilder();
+      Map<String, TIcebergPartitionStats> nameToStats = table.getIcebergPartitionStats();
+      if (table.getNumRows() >= 0) {
+        rowBuilder.add(table.getNumRows());
+      } else {
+        rowBuilder.add(nameToStats.values().stream().mapToLong(
+            TIcebergPartitionStats::getNum_rows).sum());
+      }
+      rowBuilder.add(nameToStats.values().stream().mapToLong(
+          TIcebergPartitionStats::getNum_files).sum());
+      rowBuilder.addBytes(nameToStats.values().stream().mapToLong(
+          TIcebergPartitionStats::getFile_size_in_bytes).sum());
+      if (!table.isMarkedCached()) {
+        rowBuilder.add("NOT CACHED");
+        rowBuilder.add("NOT CACHED");
+      } else {
+        long cachedBytes = 0L;
+        for (FileDescriptor fd: table.getPathHashToFileDescMap().values()) {
+          int numBlocks = fd.getNumFileBlocks();
+          for (int i = 0; i < numBlocks; ++i) {
+            FbFileBlock block = fd.getFbFileBlock(i);
+            if (FileBlock.hasCachedReplica(block)) {
+              cachedBytes += FileBlock.getLength(block);
+            }
+          }
+        }
+        rowBuilder.addBytes(cachedBytes);
+
+        Short rep = HdfsCachingUtil
+            .getCachedCacheReplication(table.getMetaStoreTable().getParameters());
+        rowBuilder.add(rep.toString());
+      }
+      rowBuilder.add(table.getIcebergFileFormat().toString());
+      rowBuilder.add(Boolean.FALSE.toString());
+      rowBuilder.add(table.getLocation());
+      result.addToRows(rowBuilder.get());
+
+      return result;
+    }
+
     /**
      * Get the field schema list of the current PartitionSpec from Iceberg table.
      *
@@ -608,37 +701,52 @@ public interface FeIcebergTable extends FeFsTable {
      */
     public static Map<String, TIcebergPartitionStats> loadPartitionStats(
         IcebergTable table) throws TableLoadingException {
-      List<DataFile> dataFileList =
-          IcebergUtil
-              .getIcebergFiles(table, new ArrayList<>(), /*timeTravelSpecl=*/null)
-              .first;
-      Map<String, TIcebergPartitionStats> partitionStats = new HashMap<>();
-      for (DataFile dataFile : dataFileList) {
-        String partitionKey = getParitionKey(table, dataFile);
-        if (partitionStats.containsKey(partitionKey)) {
-          TIcebergPartitionStats info = partitionStats.get(partitionKey);
-          info.num_rows += dataFile.recordCount();
-          info.num_files += 1;
-          partitionStats.put(partitionKey, info);
-        } else {
-          TIcebergPartitionStats icebergPartitionStats =
-              new TIcebergPartitionStats();
-          icebergPartitionStats.num_rows = dataFile.recordCount();
-          icebergPartitionStats.num_files = 1;
-          partitionStats.put(partitionKey, icebergPartitionStats);
+      Pair<List<DataFile>, Set<DeleteFile>> icebergFiles = IcebergUtil
+          .getIcebergFiles(table, new ArrayList<>(), /*timeTravelSpec=*/null);
+      List<DataFile> dataFileList = icebergFiles.first;
+      Set<DeleteFile> deleteFileList = icebergFiles.second;
+      Map<String, TIcebergPartitionStats> nameToStats = new HashMap<>();
+      for (ContentFile<?> contentFile : Iterables.concat(dataFileList, deleteFileList)) {
+        String name = getPartitionKey(table, contentFile);
+        nameToStats.put(name, mergePartitionStats(nameToStats, contentFile, name));
+      }
+      return nameToStats;
+    }
+
+    /**
+     * Return the iceberg partition statistics for merging partitionNameToStats and
+     * contentFile based on partitionName. We only count num_rows for FileContent.DATA.
+     */
+    private static TIcebergPartitionStats mergePartitionStats(
+        Map<String, TIcebergPartitionStats> partitionNameToStats,
+        ContentFile<?> contentFile, String partitionName) {
+      TIcebergPartitionStats info;
+      if (partitionNameToStats.containsKey(partitionName)) {
+        info = partitionNameToStats.get(partitionName);
+        if (contentFile.content().equals(FileContent.DATA)) {
+          info.num_rows += contentFile.recordCount();
+        }
+        info.num_files += 1;
+        info.file_size_in_bytes += contentFile.fileSizeInBytes();
+      } else {
+        info = new TIcebergPartitionStats();
+        if (contentFile.content().equals(FileContent.DATA)) {
+          info.num_rows = contentFile.recordCount();
         }
+        info.num_files = 1;
+        info.file_size_in_bytes = contentFile.fileSizeInBytes();
       }
-      return partitionStats;
+      return info;
     }
 
     /**
      * Get iceberg partition from a dataFile and wrapper to a json string
      */
-    public static String getParitionKey(IcebergTable table, DataFile dataFile) {
-      PartitionSpec spec = table.getIcebergApiTable().specs().get(dataFile.specId());
+    public static String getPartitionKey(IcebergTable table, ContentFile<?> contentFile) {
+      PartitionSpec spec = table.getIcebergApiTable().specs().get(contentFile.specId());
       Map<String, String> fieldNameToPartitionValue = new LinkedHashMap<>();
       for (int i = 0; i < spec.fields().size(); ++i) {
-        Object partValue = dataFile.partition().get(i, Object.class);
+        Object partValue = contentFile.partition().get(i, Object.class);
         String partValueString = null;
         if (partValue != null) {
           partValueString = partValue.toString();
diff --git a/fe/src/main/java/org/apache/impala/service/Frontend.java b/fe/src/main/java/org/apache/impala/service/Frontend.java
index eeb5b59ea..398f1e123 100644
--- a/fe/src/main/java/org/apache/impala/service/Frontend.java
+++ b/fe/src/main/java/org/apache/impala/service/Frontend.java
@@ -17,6 +17,16 @@
 
 package org.apache.impala.service;
 
+import static org.apache.impala.common.ByteUnits.MEGABYTE;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.base.Predicates;
+import com.google.common.base.Strings;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.MoreExecutors;
+import com.google.common.util.concurrent.Uninterruptibles;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -28,16 +38,15 @@ import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.ExecutionException;
 import java.util.stream.Collectors;
-
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
@@ -47,9 +56,9 @@ import org.apache.hadoop.hive.metastore.api.LockLevel;
 import org.apache.hadoop.hive.metastore.api.LockType;
 import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
 import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
-import org.apache.iceberg.Table;
 import org.apache.iceberg.HistoryEntry;
 import org.apache.iceberg.Snapshot;
+import org.apache.iceberg.Table;
 import org.apache.impala.analysis.AlterDbStmt;
 import org.apache.impala.analysis.AnalysisContext;
 import org.apache.impala.analysis.AnalysisContext.AnalysisResult;
@@ -82,8 +91,8 @@ import org.apache.impala.analysis.TruncateStmt;
 import org.apache.impala.authentication.saml.ImpalaSamlClient;
 import org.apache.impala.authorization.AuthorizationChecker;
 import org.apache.impala.authorization.AuthorizationConfig;
-import org.apache.impala.authorization.AuthorizationManager;
 import org.apache.impala.authorization.AuthorizationFactory;
+import org.apache.impala.authorization.AuthorizationManager;
 import org.apache.impala.authorization.ImpalaInternalAdminUser;
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.authorization.PrivilegeRequest;
@@ -104,7 +113,6 @@ import org.apache.impala.catalog.FeIcebergTable;
 import org.apache.impala.catalog.FeKuduTable;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.Function;
-import org.apache.impala.catalog.HdfsTable;
 import org.apache.impala.catalog.ImpaladCatalog;
 import org.apache.impala.catalog.ImpaladTableUsageTracker;
 import org.apache.impala.catalog.MaterializedViewHdfsTable;
@@ -112,7 +120,6 @@ import org.apache.impala.catalog.MetaStoreClientPool;
 import org.apache.impala.catalog.MetaStoreClientPool.MetaStoreClient;
 import org.apache.impala.catalog.TableLoadingException;
 import org.apache.impala.catalog.Type;
-import org.apache.impala.catalog.View;
 import org.apache.impala.catalog.local.InconsistentMetadataFetchException;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.FileSystemUtil;
@@ -120,7 +127,6 @@ import org.apache.impala.common.ImpalaException;
 import org.apache.impala.common.InternalException;
 import org.apache.impala.common.KuduTransactionManager;
 import org.apache.impala.common.NotImplementedException;
-import org.apache.impala.common.Pair;
 import org.apache.impala.common.PrintUtils;
 import org.apache.impala.common.RuntimeEnv;
 import org.apache.impala.common.TransactionException;
@@ -143,8 +149,8 @@ import org.apache.impala.thrift.TClientRequest;
 import org.apache.impala.thrift.TColumn;
 import org.apache.impala.thrift.TColumnValue;
 import org.apache.impala.thrift.TCommentOnParams;
+import org.apache.impala.thrift.TCopyTestCaseReq;
 import org.apache.impala.thrift.TCreateDropRoleParams;
-import org.apache.impala.thrift.TDataSink;
 import org.apache.impala.thrift.TDdlExecRequest;
 import org.apache.impala.thrift.TDdlQueryOptions;
 import org.apache.impala.thrift.TDdlType;
@@ -161,12 +167,9 @@ import org.apache.impala.thrift.TGetTableHistoryResult;
 import org.apache.impala.thrift.TGetTableHistoryResultItem;
 import org.apache.impala.thrift.TGrantRevokePrivParams;
 import org.apache.impala.thrift.TGrantRevokeRoleParams;
-import org.apache.impala.thrift.THdfsTableSink;
-import org.apache.impala.thrift.TKuduTableSink;
 import org.apache.impala.thrift.TLineageGraph;
 import org.apache.impala.thrift.TLoadDataReq;
 import org.apache.impala.thrift.TLoadDataResp;
-import org.apache.impala.thrift.TCopyTestCaseReq;
 import org.apache.impala.thrift.TMetadataOpRequest;
 import org.apache.impala.thrift.TPlanExecInfo;
 import org.apache.impala.thrift.TPlanFragment;
@@ -182,7 +185,6 @@ import org.apache.impala.thrift.TShowFilesParams;
 import org.apache.impala.thrift.TShowStatsOp;
 import org.apache.impala.thrift.TStmtType;
 import org.apache.impala.thrift.TTableName;
-import org.apache.impala.thrift.TTableSink;
 import org.apache.impala.thrift.TTruncateParams;
 import org.apache.impala.thrift.TUniqueId;
 import org.apache.impala.thrift.TUpdateCatalogCacheRequest;
@@ -203,18 +205,6 @@ import org.apache.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.base.Predicates;
-import com.google.common.base.Strings;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.MoreExecutors;
-import com.google.common.util.concurrent.Uninterruptibles;
-
-import static org.apache.impala.common.ByteUnits.MEGABYTE;
-import static org.apache.impala.common.ByteUnits.GIGABYTE;
-
 /**
  * Frontend API for the impalad process.
  * This class allows the impala daemon to create TQueryExecRequest
@@ -1443,6 +1433,9 @@ public class Frontend {
       if (table instanceof FeIcebergTable && op == TShowStatsOp.PARTITIONS) {
         return FeIcebergTable.Utils.getPartitionStats((FeIcebergTable) table);
       }
+      if (table instanceof FeIcebergTable && op == TShowStatsOp.TABLE_STATS) {
+        return FeIcebergTable.Utils.getTableStats((FeIcebergTable) table);
+      }
       return ((FeFsTable) table).getTableStats();
     } else if (table instanceof FeHBaseTable) {
       return ((FeHBaseTable) table).getTableStats();
diff --git a/testdata/workloads/functional-query/queries/QueryTest/iceberg-compute-stats.test b/testdata/workloads/functional-query/queries/QueryTest/iceberg-compute-stats.test
index e10a39cd2..e07d4f976 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/iceberg-compute-stats.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/iceberg-compute-stats.test
@@ -17,6 +17,15 @@ show column stats ice_alltypes;
 STRING, STRING, BIGINT, BIGINT, BIGINT, DOUBLE, BIGINT, BIGINT
 ====
 ---- QUERY
+show table stats ice_alltypes
+---- LABELS
+#ROWS, #Files, Size, Bytes Cached, Cache Replication, Format, Incremental stats, Location
+---- RESULTS: VERIFY_IS_EQUAL
+2,1,'2.33KB','NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/$DATABASE.db/ice_alltypes'
+---- TYPES
+BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING
+====
+---- QUERY
 # Table-level stats are automatically updated.
 # 'impala.lastComputeStatsTime' is not set yet.
 describe formatted ice_alltypes;
diff --git a/testdata/workloads/functional-query/queries/QueryTest/iceberg-v2-read-position-deletes-orc.test b/testdata/workloads/functional-query/queries/QueryTest/iceberg-v2-read-position-deletes-orc.test
index 3fb363311..6953273a9 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/iceberg-v2-read-position-deletes-orc.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/iceberg-v2-read-position-deletes-orc.test
@@ -1,5 +1,37 @@
 ====
 ---- QUERY
+COMPUTE STATS iceberg_v2_no_deletes_orc
+---- RESULTS
+'Updated 1 partition(s) and 2 column(s).'
+---- TYPES
+STRING
+====
+---- QUERY
+SHOW TABLE STATS iceberg_v2_no_deletes_orc
+---- LABELS
+#ROWS, #Files, Size, Bytes Cached, Cache Replication, Format, Incremental stats, Location
+---- RESULTS: VERIFY_IS_EQUAL
+3,1,'340B','NOT CACHED','NOT CACHED','ORC','false','$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_no_deletes_orc'
+---- TYPES
+BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING
+====
+---- QUERY
+DROP STATS iceberg_v2_no_deletes_orc
+---- RESULTS
+'Stats have been dropped.'
+---- TYPES
+STRING
+====
+---- QUERY
+SHOW TABLE STATS iceberg_v2_no_deletes_orc
+---- LABELS
+#ROWS, #Files, Size, Bytes Cached, Cache Replication, Format, Incremental stats, Location
+---- RESULTS: VERIFY_IS_EQUAL
+3,1,'340B','NOT CACHED','NOT CACHED','ORC','false','$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_no_deletes_orc'
+---- TYPES
+BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING
+====
+---- QUERY
 SELECT count(*) from iceberg_v2_no_deletes_orc
 ---- RESULTS
 3
@@ -10,13 +42,77 @@ aggregation(SUM, NumRowGroups): 0
 aggregation(SUM, NumFileMetadataRead): 0
 ====
 ---- QUERY
-SELECT count(*) from iceberg_v2_positional_delete_all_rows;
+COMPUTE STATS iceberg_v2_positional_delete_all_rows_orc
+---- RESULTS
+'Updated 1 partition(s) and 2 column(s).'
+---- TYPES
+STRING
+====
+---- QUERY
+SHOW TABLE STATS iceberg_v2_positional_delete_all_rows_orc
+---- LABELS
+#ROWS, #Files, Size, Bytes Cached, Cache Replication, Format, Incremental stats, Location
+---- RESULTS: VERIFY_IS_EQUAL
+0,2,'1.62KB','NOT CACHED','NOT CACHED','ORC','false','$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_delete_all_rows_orc'
+---- TYPES
+BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING
+====
+---- QUERY
+DROP STATS iceberg_v2_positional_delete_all_rows_orc
+---- RESULTS
+'Stats have been dropped.'
+---- TYPES
+STRING
+====
+---- QUERY
+SHOW TABLE STATS iceberg_v2_positional_delete_all_rows_orc
+---- LABELS
+#ROWS, #Files, Size, Bytes Cached, Cache Replication, Format, Incremental stats, Location
+---- RESULTS: VERIFY_IS_EQUAL
+3,2,'1.62KB','NOT CACHED','NOT CACHED','ORC','false','$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_delete_all_rows_orc'
+---- TYPES
+BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING
+====
+---- QUERY
+SELECT count(*) from iceberg_v2_positional_delete_all_rows_orc;
 ---- RESULTS
 0
 ---- TYPES
 bigint
 ====
 ---- QUERY
+COMPUTE STATS iceberg_v2_positional_not_all_data_files_have_delete_files_orc
+---- RESULTS
+'Updated 1 partition(s) and 2 column(s).'
+---- TYPES
+STRING
+====
+---- QUERY
+SHOW TABLE STATS iceberg_v2_positional_not_all_data_files_have_delete_files_orc
+---- LABELS
+#ROWS, #Files, Size, Bytes Cached, Cache Replication, Format, Incremental stats, Location
+---- RESULTS: VERIFY_IS_EQUAL
+6,6,'3.97KB','NOT CACHED','NOT CACHED','ORC','false','$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_not_all_data_files_have_delete_files_orc'
+---- TYPES
+BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING
+====
+---- QUERY
+DROP STATS iceberg_v2_positional_not_all_data_files_have_delete_files_orc
+---- RESULTS
+'Stats have been dropped.'
+---- TYPES
+STRING
+====
+---- QUERY
+SHOW TABLE STATS iceberg_v2_positional_not_all_data_files_have_delete_files_orc
+---- LABELS
+#ROWS, #Files, Size, Bytes Cached, Cache Replication, Format, Incremental stats, Location
+---- RESULTS: VERIFY_IS_EQUAL
+10,6,'3.97KB','NOT CACHED','NOT CACHED','ORC','false','$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_not_all_data_files_have_delete_files_orc'
+---- TYPES
+BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING
+====
+---- QUERY
 SELECT count(*) from iceberg_v2_positional_not_all_data_files_have_delete_files_orc
 ---- RESULTS
 6
@@ -24,6 +120,38 @@ SELECT count(*) from iceberg_v2_positional_not_all_data_files_have_delete_files_
 bigint
 ====
 ---- QUERY
+COMPUTE STATS iceberg_v2_partitioned_position_deletes_orc
+---- RESULTS
+'Updated 1 partition(s) and 4 column(s).'
+---- TYPES
+STRING
+====
+---- QUERY
+SHOW TABLE STATS iceberg_v2_partitioned_position_deletes_orc
+---- LABELS
+#ROWS, #Files, Size, Bytes Cached, Cache Replication, Format, Incremental stats, Location
+---- RESULTS: VERIFY_IS_EQUAL
+10,6,'6.53KB','NOT CACHED','NOT CACHED','ORC','false','$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_partitioned_position_deletes_orc'
+---- TYPES
+BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING
+====
+---- QUERY
+DROP STATS iceberg_v2_partitioned_position_deletes_orc
+---- RESULTS
+'Stats have been dropped.'
+---- TYPES
+STRING
+====
+---- QUERY
+SHOW TABLE STATS iceberg_v2_partitioned_position_deletes_orc
+---- LABELS
+#ROWS, #Files, Size, Bytes Cached, Cache Replication, Format, Incremental stats, Location
+---- RESULTS: VERIFY_IS_EQUAL
+20,6,'6.53KB','NOT CACHED','NOT CACHED','ORC','false','$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_partitioned_position_deletes_orc'
+---- TYPES
+BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING
+====
+---- QUERY
 SELECT count(*) from iceberg_v2_partitioned_position_deletes_orc
 ---- RESULTS
 10
@@ -38,7 +166,7 @@ SELECT count(*) from iceberg_v2_no_deletes_orc where i = 2;
 bigint
 ====
 ---- QUERY
-SELECT count(*) from iceberg_v2_positional_delete_all_rows where i > 2;
+SELECT count(*) from iceberg_v2_positional_delete_all_rows_orc where i > 2;
 ---- RESULTS
 0
 ---- TYPES
@@ -70,7 +198,7 @@ SELECT * from iceberg_v2_no_deletes_orc
 INT, STRING
 ====
 ---- QUERY
-SELECT * from iceberg_v2_positional_delete_all_rows;
+SELECT * from iceberg_v2_positional_delete_all_rows_orc;
 ---- RESULTS
 ---- TYPES
 INT, STRING
@@ -113,7 +241,7 @@ SELECT strright(upper(input__file__name),90), file__position + 1000, * from iceb
 STRING, BIGINT, INT, STRING
 ====
 ---- QUERY
-SELECT strright(upper(input__file__name),90), file__position + 1000, * from iceberg_v2_positional_delete_all_rows;
+SELECT strright(upper(input__file__name),90), file__position + 1000, * from iceberg_v2_positional_delete_all_rows_orc;
 ---- RESULTS
 ---- TYPES
 STRING, BIGINT, INT, STRING
@@ -154,7 +282,7 @@ SELECT * from iceberg_v2_no_deletes_orc where i = 2;
 INT, STRING
 ====
 ---- QUERY
-SELECT * from iceberg_v2_positional_delete_all_rows where i > 2;
+SELECT * from iceberg_v2_positional_delete_all_rows_orc where i > 2;
 ---- RESULTS
 ---- TYPES
 INT, STRING
diff --git a/testdata/workloads/functional-query/queries/QueryTest/iceberg-v2-read-position-deletes.test b/testdata/workloads/functional-query/queries/QueryTest/iceberg-v2-read-position-deletes.test
index a8d2fc63c..cde6eafd5 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/iceberg-v2-read-position-deletes.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/iceberg-v2-read-position-deletes.test
@@ -1,5 +1,37 @@
 ====
 ---- QUERY
+COMPUTE STATS iceberg_v2_no_deletes
+---- RESULTS
+'Updated 1 partition(s) and 2 column(s).'
+---- TYPES
+STRING
+====
+---- QUERY
+SHOW TABLE STATS iceberg_v2_no_deletes
+---- LABELS
+#ROWS, #Files, Size, Bytes Cached, Cache Replication, Format, Incremental stats, Location
+---- RESULTS: VERIFY_IS_EQUAL
+3,1,'625B','NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_no_deletes'
+---- TYPES
+BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING
+====
+---- QUERY
+DROP STATS iceberg_v2_no_deletes
+---- RESULTS
+'Stats have been dropped.'
+---- TYPES
+STRING
+====
+---- QUERY
+SHOW TABLE STATS iceberg_v2_no_deletes
+---- LABELS
+#ROWS, #Files, Size, Bytes Cached, Cache Replication, Format, Incremental stats, Location
+---- RESULTS: VERIFY_IS_EQUAL
+3,1,'625B','NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_no_deletes'
+---- TYPES
+BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING
+====
+---- QUERY
 SELECT count(*) from iceberg_v2_no_deletes
 ---- RESULTS
 3
@@ -10,6 +42,38 @@ aggregation(SUM, NumRowGroups): 0
 aggregation(SUM, NumFileMetadataRead): 0
 ====
 ---- QUERY
+COMPUTE STATS iceberg_v2_delete_positional
+---- RESULTS
+'Updated 1 partition(s) and 2 column(s).'
+---- TYPES
+STRING
+====
+---- QUERY
+SHOW TABLE STATS iceberg_v2_delete_positional
+---- LABELS
+#ROWS, #Files, Size, Bytes Cached, Cache Replication, Format, Incremental stats, Location
+---- RESULTS: VERIFY_IS_EQUAL
+2,2,'2.21KB','NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_delete_positional'
+---- TYPES
+BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING
+====
+---- QUERY
+DROP STATS iceberg_v2_delete_positional
+---- RESULTS
+'Stats have been dropped.'
+---- TYPES
+STRING
+====
+---- QUERY
+SHOW TABLE STATS iceberg_v2_delete_positional
+---- LABELS
+#ROWS, #Files, Size, Bytes Cached, Cache Replication, Format, Incremental stats, Location
+---- RESULTS: VERIFY_IS_EQUAL
+3,2,'2.21KB','NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_delete_positional'
+---- TYPES
+BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING
+====
+---- QUERY
 SELECT count(*) from iceberg_v2_delete_positional;
 ---- RESULTS
 2
@@ -17,6 +81,38 @@ SELECT count(*) from iceberg_v2_delete_positional;
 bigint
 ====
 ---- QUERY
+COMPUTE STATS iceberg_v2_positional_delete_all_rows
+---- RESULTS
+'Updated 1 partition(s) and 2 column(s).'
+---- TYPES
+STRING
+====
+---- QUERY
+SHOW TABLE STATS iceberg_v2_positional_delete_all_rows
+---- LABELS
+#ROWS, #Files, Size, Bytes Cached, Cache Replication, Format, Incremental stats, Location
+---- RESULTS: VERIFY_IS_EQUAL
+0,2,'3.21KB','NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_delete_all_rows'
+---- TYPES
+BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING
+====
+---- QUERY
+DROP STATS iceberg_v2_positional_delete_all_rows
+---- RESULTS
+'Stats have been dropped.'
+---- TYPES
+STRING
+====
+---- QUERY
+SHOW TABLE STATS iceberg_v2_positional_delete_all_rows
+---- LABELS
+#ROWS, #Files, Size, Bytes Cached, Cache Replication, Format, Incremental stats, Location
+---- RESULTS: VERIFY_IS_EQUAL
+3,2,'3.21KB','NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_delete_all_rows'
+---- TYPES
+BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING
+====
+---- QUERY
 SELECT count(*) from iceberg_v2_positional_delete_all_rows;
 ---- RESULTS
 0
@@ -24,6 +120,38 @@ SELECT count(*) from iceberg_v2_positional_delete_all_rows;
 bigint
 ====
 ---- QUERY
+COMPUTE STATS iceberg_v2_positional_not_all_data_files_have_delete_files
+---- RESULTS
+'Updated 1 partition(s) and 2 column(s).'
+---- TYPES
+STRING
+====
+---- QUERY
+SHOW TABLE STATS iceberg_v2_positional_not_all_data_files_have_delete_files
+---- LABELS
+#ROWS, #Files, Size, Bytes Cached, Cache Replication, Format, Incremental stats, Location
+---- RESULTS: VERIFY_IS_EQUAL
+6,6,'7.77KB','NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_not_all_data_files_have_delete_files'
+---- TYPES
+BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING
+====
+---- QUERY
+DROP STATS iceberg_v2_positional_not_all_data_files_have_delete_files
+---- RESULTS
+'Stats have been dropped.'
+---- TYPES
+STRING
+====
+---- QUERY
+SHOW TABLE STATS iceberg_v2_positional_not_all_data_files_have_delete_files
+---- LABELS
+#ROWS, #Files, Size, Bytes Cached, Cache Replication, Format, Incremental stats, Location
+---- RESULTS: VERIFY_IS_EQUAL
+10,6,'7.77KB','NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_not_all_data_files_have_delete_files'
+---- TYPES
+BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING
+====
+---- QUERY
 SELECT count(*) from iceberg_v2_positional_not_all_data_files_have_delete_files
 ---- RESULTS
 6
@@ -31,6 +159,38 @@ SELECT count(*) from iceberg_v2_positional_not_all_data_files_have_delete_files
 bigint
 ====
 ---- QUERY
+COMPUTE STATS iceberg_v2_positional_update_all_rows
+---- RESULTS
+'Updated 1 partition(s) and 2 column(s).'
+---- TYPES
+STRING
+====
+---- QUERY
+SHOW TABLE STATS iceberg_v2_positional_update_all_rows
+---- LABELS
+#ROWS, #Files, Size, Bytes Cached, Cache Replication, Format, Incremental stats, Location
+---- RESULTS: VERIFY_IS_EQUAL
+3,3,'3.82KB','NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_update_all_rows'
+---- TYPES
+BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING
+====
+---- QUERY
+DROP STATS iceberg_v2_positional_update_all_rows
+---- RESULTS
+'Stats have been dropped.'
+---- TYPES
+STRING
+====
+---- QUERY
+SHOW TABLE STATS iceberg_v2_positional_update_all_rows
+---- LABELS
+#ROWS, #Files, Size, Bytes Cached, Cache Replication, Format, Incremental stats, Location
+---- RESULTS: VERIFY_IS_EQUAL
+6,3,'3.82KB','NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_positional_update_all_rows'
+---- TYPES
+BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING
+====
+---- QUERY
 SELECT count(*) from iceberg_v2_positional_update_all_rows
 ---- RESULTS
 3
@@ -38,6 +198,38 @@ SELECT count(*) from iceberg_v2_positional_update_all_rows
 bigint
 ====
 ---- QUERY
+COMPUTE STATS iceberg_v2_partitioned_position_deletes
+---- RESULTS
+'Updated 1 partition(s) and 4 column(s).'
+---- TYPES
+STRING
+====
+---- QUERY
+SHOW TABLE STATS iceberg_v2_partitioned_position_deletes
+---- LABELS
+#ROWS, #Files, Size, Bytes Cached, Cache Replication, Format, Incremental stats, Location
+---- RESULTS: VERIFY_IS_EQUAL
+10,6,'12.95KB','NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_partitioned_position_deletes'
+---- TYPES
+BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING
+====
+---- QUERY
+DROP STATS iceberg_v2_partitioned_position_deletes
+---- RESULTS
+'Stats have been dropped.'
+---- TYPES
+STRING
+====
+---- QUERY
+SHOW TABLE STATS iceberg_v2_partitioned_position_deletes
+---- LABELS
+#ROWS, #Files, Size, Bytes Cached, Cache Replication, Format, Incremental stats, Location
+---- RESULTS: VERIFY_IS_EQUAL
+20,6,'12.95KB','NOT CACHED','NOT CACHED','PARQUET','false','$NAMENODE/test-warehouse/iceberg_test/hadoop_catalog/ice/iceberg_v2_partitioned_position_deletes'
+---- TYPES
+BIGINT,BIGINT,STRING,STRING,STRING,STRING,STRING,STRING
+====
+---- QUERY
 SELECT count(*) from iceberg_v2_partitioned_position_deletes
 ---- RESULTS
 10