You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by ta...@apache.org on 2019/06/26 17:34:07 UTC

[impala] branch master updated (572c127 -> e158352)

This is an automated email from the ASF dual-hosted git repository.

tarmstrong pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git.


    from 572c127  IMPALA-8698: Disable row count estimate to avoid a flaky test
     new d750d88  Revert "IMPALA-7322: Add storage wait time to profile"
     new e158352  Revert "IMPALA-8627: re-enable catalog v2 in containers"

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 common/thrift/CatalogObjects.thrift                |  4 --
 docker/catalogd/Dockerfile                         |  2 +-
 docker/coord_exec/Dockerfile                       |  3 +-
 docker/coordinator/Dockerfile                      |  3 +-
 .../apache/impala/analysis/StmtMetadataLoader.java | 22 +------
 .../java/org/apache/impala/catalog/HBaseTable.java | 17 ++----
 .../java/org/apache/impala/catalog/HdfsTable.java  | 67 ++++++----------------
 .../java/org/apache/impala/catalog/KuduTable.java  |  9 +--
 .../main/java/org/apache/impala/catalog/Table.java | 12 ----
 tests/query_test/test_observability.py             | 29 ----------
 10 files changed, 32 insertions(+), 136 deletions(-)


[impala] 02/02: Revert "IMPALA-8627: re-enable catalog v2 in containers"

Posted by ta...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tarmstrong pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit e158352fe2f0c911c7bd60caae90673f8fc6fde0
Author: Tim Armstrong <ta...@cloudera.com>
AuthorDate: Wed Jun 26 10:06:53 2019 -0700

    Revert "IMPALA-8627: re-enable catalog v2 in containers"
    
    This reverts commit 1e1b8e9bc6418fecfe4e57cb4fe25d1f64129657.
    
    Some tests appear to be flaky as a result of this change.
    
    Change-Id: I5037c94d22101458f0c6fffa976f0ee73f5f9455
    Reviewed-on: http://gerrit.cloudera.org:8080/13739
    Reviewed-by: Tim Armstrong <ta...@cloudera.com>
    Tested-by: Tim Armstrong <ta...@cloudera.com>
---
 docker/catalogd/Dockerfile    | 2 +-
 docker/coord_exec/Dockerfile  | 3 +--
 docker/coordinator/Dockerfile | 3 +--
 3 files changed, 3 insertions(+), 5 deletions(-)

diff --git a/docker/catalogd/Dockerfile b/docker/catalogd/Dockerfile
index 11a7ed7..6989dec 100644
--- a/docker/catalogd/Dockerfile
+++ b/docker/catalogd/Dockerfile
@@ -24,5 +24,5 @@ EXPOSE 25020
 ENTRYPOINT ["/opt/impala/bin/daemon_entrypoint.sh", "/opt/impala/bin/catalogd",\
      "-log_dir=/opt/impala/logs",\
      "-abort_on_config_error=false", "-state_store_host=statestored",\
-     "-catalog_topic_mode=minimal", "-hms_event_polling_interval_s=1",\
+     "-catalog_topic_mode=full", "-hms_event_polling_interval_s=0",\
      "-invalidate_tables_on_memory_pressure=true"]
diff --git a/docker/coord_exec/Dockerfile b/docker/coord_exec/Dockerfile
index 4c83976..271fee5 100644
--- a/docker/coord_exec/Dockerfile
+++ b/docker/coord_exec/Dockerfile
@@ -31,5 +31,4 @@ ENTRYPOINT ["/opt/impala/bin/daemon_entrypoint.sh", "/opt/impala/bin/impalad",\
      "-log_dir=/opt/impala/logs",\
      "-abort_on_config_error=false", "-state_store_host=statestored",\
      "-catalog_service_host=catalogd", "-mem_limit_includes_jvm=true",\
-     "-use_local_catalog=true", "-invalidate_tables_on_memory_pressure=true",\
-     "--rpc_use_loopback=true"]
+     "-use_local_catalog=false", "--rpc_use_loopback=true"]
diff --git a/docker/coordinator/Dockerfile b/docker/coordinator/Dockerfile
index 54fcc5b..8251659 100644
--- a/docker/coordinator/Dockerfile
+++ b/docker/coordinator/Dockerfile
@@ -31,6 +31,5 @@ ENTRYPOINT ["/opt/impala/bin/daemon_entrypoint.sh", "/opt/impala/bin/impalad",\
      "-log_dir=/opt/impala/logs",\
      "-abort_on_config_error=false", "-state_store_host=statestored",\
      "-catalog_service_host=catalogd", "-is_executor=false", \
-     "-mem_limit_includes_jvm=true", \
-     "-use_local_catalog=true", "-invalidate_tables_on_memory_pressure=true",\
+     "-mem_limit_includes_jvm=true", "-use_local_catalog=false", \
      "--rpc_use_loopback=true"]


[impala] 01/02: Revert "IMPALA-7322: Add storage wait time to profile"

Posted by ta...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

tarmstrong pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit d750d884e5fc28117c051ba29b73cdc51e698cea
Author: Tim Armstrong <ta...@cloudera.com>
AuthorDate: Wed Jun 26 10:06:40 2019 -0700

    Revert "IMPALA-7322: Add storage wait time to profile"
    
    This reverts commit 2fd795cf56e65a43087375867dcc9890e3a27330.
    
    The test added has some issues:
    * Fails with the local catalog enabled
    * Is flaky if run concurrently with other tests that touch the same
      tables.
    
    Change-Id: I8fc33db75c21973d209d518c1fb02bd5f9728aee
    Reviewed-on: http://gerrit.cloudera.org:8080/13738
    Reviewed-by: Tim Armstrong <ta...@cloudera.com>
    Tested-by: Tim Armstrong <ta...@cloudera.com>
---
 common/thrift/CatalogObjects.thrift                |  4 --
 .../apache/impala/analysis/StmtMetadataLoader.java | 22 +------
 .../java/org/apache/impala/catalog/HBaseTable.java | 17 ++----
 .../java/org/apache/impala/catalog/HdfsTable.java  | 67 ++++++----------------
 .../java/org/apache/impala/catalog/KuduTable.java  |  9 +--
 .../main/java/org/apache/impala/catalog/Table.java | 12 ----
 tests/query_test/test_observability.py             | 29 ----------
 7 files changed, 29 insertions(+), 131 deletions(-)

diff --git a/common/thrift/CatalogObjects.thrift b/common/thrift/CatalogObjects.thrift
index 1152ecf..01bba1d 100644
--- a/common/thrift/CatalogObjects.thrift
+++ b/common/thrift/CatalogObjects.thrift
@@ -490,10 +490,6 @@ struct TTable {
   // For example ValidReaderWriteIdList object's format is:
   // <table_name>:<highwatermark>:<minOpenWriteId>:<open_writeids>:<abort_writeids>
   14: optional string valid_write_ids
-
-  // Set if this table needs storage access during metadata load.
-  // Time used for storage loading in nanoseconds.
-  15: optional i64 storage_metadata_load_time
 }
 
 // Represents a database.
diff --git a/fe/src/main/java/org/apache/impala/analysis/StmtMetadataLoader.java b/fe/src/main/java/org/apache/impala/analysis/StmtMetadataLoader.java
index bfe041b..fd11af1 100644
--- a/fe/src/main/java/org/apache/impala/analysis/StmtMetadataLoader.java
+++ b/fe/src/main/java/org/apache/impala/analysis/StmtMetadataLoader.java
@@ -18,7 +18,6 @@
 package org.apache.impala.analysis;
 
 import java.util.ArrayList;
-import java.util.concurrent.TimeUnit;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -29,7 +28,6 @@ import org.apache.impala.catalog.FeCatalog;
 import org.apache.impala.catalog.FeDb;
 import org.apache.impala.catalog.FeTable;
 import org.apache.impala.catalog.FeView;
-import org.apache.impala.catalog.Table;
 import org.apache.impala.common.InternalException;
 import org.apache.impala.compat.MetastoreShim;
 import org.apache.impala.service.Frontend;
@@ -226,25 +224,11 @@ public class StmtMetadataLoader {
       missingTbls = newMissingTbls;
       ++numCatalogUpdatesReceived_;
     }
-
     if (timeline_ != null) {
-      long storageLoadTimeNano = 0;
-      // Calculate the total storage loading time for this query (not including
-      // the tables already loaded before the query was called).
-      storageLoadTimeNano =
-          loadedTbls_.values()
-              .stream()
-              .filter(Table.class::isInstance)
-              .map(Table.class::cast)
-              .filter(loadedTbl -> requestedTbls.contains(loadedTbl.getTableName()))
-              .mapToLong(Table::getStorageLoadTime)
-              .sum();
-      timeline_.markEvent(String.format("Metadata load finished. "
-              + "loaded-tables=%d/%d load-requests=%d catalog-updates=%d "
-              + "storage-load-time=%dms",
+      timeline_.markEvent(String.format("Metadata load finished. " +
+          "loaded-tables=%d/%d load-requests=%d catalog-updates=%d",
           requestedTbls.size(), loadedTbls_.size(), numLoadRequestsSent_,
-          numCatalogUpdatesReceived_,
-          TimeUnit.MILLISECONDS.convert(storageLoadTimeNano, TimeUnit.NANOSECONDS)));
+          numCatalogUpdatesReceived_));
 
       if (MetastoreShim.getMajorVersion() > 2) {
         StringBuilder validIdsBuf = new StringBuilder("Loaded ValidWriteIdLists: ");
diff --git a/fe/src/main/java/org/apache/impala/catalog/HBaseTable.java b/fe/src/main/java/org/apache/impala/catalog/HBaseTable.java
index 6810be9..d75afa7 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HBaseTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HBaseTable.java
@@ -103,18 +103,11 @@ public class HBaseTable extends Table implements FeHBaseTable {
     Preconditions.checkNotNull(getMetaStoreTable());
     try (Timer.Context timer = getMetrics().getTimer(Table.LOAD_DURATION_METRIC).time()) {
       msTable_ = msTbl;
-      final Timer.Context storageLoadTimer =
-          getMetrics().getTimer(Table.STORAGE_METADATA_LOAD_DURATION_METRIC).time();
-      List<Column> cols;
-      try {
-        hbaseTableName_ = Util.getHBaseTableName(getMetaStoreTable());
-        // Warm up the connection and verify the table exists.
-        Util.getHBaseTable(hbaseTableName_).close();
-        columnFamilies_ = null;
-        cols = Util.loadColumns(msTable_);
-      } finally {
-        storageMetadataLoadTime_ = storageLoadTimer.stop();
-      }
+      hbaseTableName_ = Util.getHBaseTableName(getMetaStoreTable());
+      // Warm up the connection and verify the table exists.
+      Util.getHBaseTable(hbaseTableName_).close();
+      columnFamilies_ = null;
+      List<Column> cols = Util.loadColumns(msTable_);
       clearColumns();
       for (Column col : cols) addColumn(col);
       // Set table stats.
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
index 08ab6e5..779a96e 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsTable.java
@@ -31,7 +31,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeMap;
-import java.util.concurrent.TimeUnit;
 
 import org.apache.avro.Schema;
 import org.apache.hadoop.conf.Configuration;
@@ -88,7 +87,6 @@ import org.apache.impala.util.ThreadNameAnnotator;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.codahale.metrics.Clock;
 import com.codahale.metrics.Gauge;
 import com.codahale.metrics.Timer;
 import com.google.common.annotations.VisibleForTesting;
@@ -506,18 +504,16 @@ public class HdfsTable extends Table implements FeFsTable {
    * Create HdfsPartition objects corresponding to 'msPartitions' and add them to this
    * table's partition list. Any partition metadata will be reset and loaded from
    * scratch. For each partition created, we load the block metadata for each data file
-   * under it. Returns time spent loading the filesystem metadata in nanoseconds.
+   * under it.
    *
    * If there are no partitions in the Hive metadata, a single partition is added with no
    * partition keys.
    */
-  private long loadAllPartitions(
+  private void loadAllPartitions(
       List<org.apache.hadoop.hive.metastore.api.Partition> msPartitions,
       org.apache.hadoop.hive.metastore.api.Table msTbl) throws IOException,
       CatalogException {
     Preconditions.checkNotNull(msTbl);
-    final Clock clock = Clock.defaultClock();
-    long startTime = clock.getTick();
     initializePartitionMetadata(msTbl);
     FsPermissionCache permCache = preloadPermissionsCache(msPartitions);
 
@@ -545,7 +541,6 @@ public class HdfsTable extends Table implements FeFsTable {
     }
     // Load the file metadata from scratch.
     loadFileMetadataForPartitions(partitionMap_.values(), /*isRefresh=*/false);
-    return clock.getTick() - startTime;
   }
 
 
@@ -931,10 +926,7 @@ public class HdfsTable extends Table implements FeFsTable {
         loadTableSchema ? "table definition and " : "",
         partitionsToUpdate == null ? "all" : String.valueOf(partitionsToUpdate.size()),
         msTbl.getDbName(), msTbl.getTableName(), reason);
-    LOG.info(annotation);
-    final Timer storageLdTimer =
-        getMetrics().getTimer(Table.STORAGE_METADATA_LOAD_DURATION_METRIC);
-    storageMetadataLoadTime_ = 0;
+    LOG.info(annotation);;
     try (ThreadNameAnnotator tna = new ThreadNameAnnotator(annotation)) {
       // turn all exceptions into TableLoadingException
       msTable_ = msTbl;
@@ -948,19 +940,16 @@ public class HdfsTable extends Table implements FeFsTable {
             //TODO writeIDs may also be loaded in other code paths.
             loadValidWriteIdList(client);
         }
-
         // Load partition and file metadata
         if (reuseMetadata) {
           // Incrementally update this table's partitions and file metadata
           Preconditions.checkState(
               partitionsToUpdate == null || loadParitionFileMetadata);
-          storageMetadataLoadTime_ += updateMdFromHmsTable(msTbl);
+          updateMdFromHmsTable(msTbl);
           if (msTbl.getPartitionKeysSize() == 0) {
-            if (loadParitionFileMetadata) {
-              storageMetadataLoadTime_ += updateUnpartitionedTableFileMd();
-            }
+            if (loadParitionFileMetadata) updateUnpartitionedTableFileMd();
           } else {
-            storageMetadataLoadTime_ += updatePartitionsFromHms(
+            updatePartitionsFromHms(
                 client, partitionsToUpdate, loadParitionFileMetadata);
           }
           LOG.info("Incrementally loaded table metadata for: " + getFullName());
@@ -971,7 +960,7 @@ public class HdfsTable extends Table implements FeFsTable {
               MetaStoreUtil.fetchAllPartitions(
                   client, db_.getName(), name_, NUM_PARTITION_FETCH_RETRIES);
           LOG.info("Fetched partition metadata from the Metastore: " + getFullName());
-          storageMetadataLoadTime_ = loadAllPartitions(msPartitions, msTbl);
+          loadAllPartitions(msPartitions, msTbl);
         }
         if (loadTableSchema) setAvroSchema(client, msTbl);
         setTableStats(msTbl);
@@ -984,47 +973,37 @@ public class HdfsTable extends Table implements FeFsTable {
             + getFullName(), e);
       }
     } finally {
-      storageLdTimer.update(storageMetadataLoadTime_, TimeUnit.NANOSECONDS);
       context.stop();
     }
   }
 
   /**
    * Updates the table metadata, including 'hdfsBaseDir_', 'isMarkedCached_',
-   * and 'accessLevel_' from 'msTbl'. Returns time spent accessing file system
-   * in nanoseconds. Throws an IOException if there was an error accessing
-   * the table location path.
+   * and 'accessLevel_' from 'msTbl'. Throws an IOException if there was an error
+   * accessing the table location path.
    */
-  private long  updateMdFromHmsTable(org.apache.hadoop.hive.metastore.api.Table msTbl)
+  private void updateMdFromHmsTable(org.apache.hadoop.hive.metastore.api.Table msTbl)
       throws IOException {
     Preconditions.checkNotNull(msTbl);
-    final Clock clock = Clock.defaultClock();
-    long filesystemAccessTime = 0;
-    long startTime = clock.getTick();
     hdfsBaseDir_ = msTbl.getSd().getLocation();
     isMarkedCached_ = HdfsCachingUtil.validateCacheParams(msTbl.getParameters());
     Path location = new Path(hdfsBaseDir_);
     accessLevel_ = getAvailableAccessLevel(getFullName(), location,
         new FsPermissionCache());
-    filesystemAccessTime = clock.getTick() - startTime;
     setMetaStoreTable(msTbl);
-    return filesystemAccessTime;
   }
 
   /**
    * Incrementally updates the file metadata of an unpartitioned HdfsTable.
-   * Returns time spent updating the file metadata in nanoseconds.
    *
    * This is optimized for the case where few files have changed. See
    * {@link #refreshFileMetadata(Path, List)} above for details.
    */
-  private long updateUnpartitionedTableFileMd() throws CatalogException {
+  private void updateUnpartitionedTableFileMd() throws CatalogException {
     Preconditions.checkState(getNumClusteringCols() == 0);
     if (LOG.isTraceEnabled()) {
       LOG.trace("update unpartitioned table: " + getFullName());
     }
-    final Clock clock = Clock.defaultClock();
-    long startTime = clock.getTick();
     HdfsPartition oldPartition = Iterables.getOnlyElement(partitionMap_.values());
 
     // Instead of updating the existing partition in place, we create a new one
@@ -1042,21 +1021,18 @@ public class HdfsTable extends Table implements FeFsTable {
     addPartition(part);
     if (isMarkedCached_) part.markCached();
     loadFileMetadataForPartitions(ImmutableList.of(part), /*isRefresh=*/true);
-    return clock.getTick() - startTime;
   }
 
   /**
-   * Updates the partitions of an HdfsTable so that they are in sync with the
-   * Hive Metastore. It reloads partitions that were marked 'dirty' by doing a
-   * DROP + CREATE. It removes from this table partitions that no longer exist
-   * in the Hive Metastore and adds partitions that were added externally (e.g.
-   * using Hive) to the Hive Metastore but do not exist in this table. If
-   * 'loadParitionFileMetadata' is true, it triggers file/block metadata reload
-   * for the partitions specified in 'partitionsToUpdate', if any, or for all
-   * the table partitions if 'partitionsToUpdate' is null. Returns time
-   * spent loading file metadata in nanoseconds.
+   * Updates the partitions of an HdfsTable so that they are in sync with the Hive
+   * Metastore. It reloads partitions that were marked 'dirty' by doing a DROP + CREATE.
+   * It removes from this table partitions that no longer exist in the Hive Metastore and
+   * adds partitions that were added externally (e.g. using Hive) to the Hive Metastore
+   * but do not exist in this table. If 'loadParitionFileMetadata' is true, it triggers
+   * file/block metadata reload for the partitions specified in 'partitionsToUpdate', if
+   * any, or for all the table partitions if 'partitionsToUpdate' is null.
    */
-  private long updatePartitionsFromHms(IMetaStoreClient client,
+  private void updatePartitionsFromHms(IMetaStoreClient client,
       Set<String> partitionsToUpdate, boolean loadPartitionFileMetadata)
       throws Exception {
     if (LOG.isTraceEnabled()) LOG.trace("Sync table partitions: " + getFullName());
@@ -1120,19 +1096,14 @@ public class HdfsTable extends Table implements FeFsTable {
     // Load file metadata. Until we have a notification mechanism for when a
     // file changes in hdfs, it is sometimes required to reload all the file
     // descriptors and block metadata of a table (e.g. REFRESH statement).
-    long fileLoadMdTime = 0;
     if (loadPartitionFileMetadata) {
-      final Clock clock = Clock.defaultClock();
-      long startTime = clock.getTick();
       if (partitionsToUpdate != null) {
         Preconditions.checkState(partitionsToLoadFiles.isEmpty());
         // Only reload file metadata of partitions specified in 'partitionsToUpdate'
         partitionsToLoadFiles = getPartitionsForNames(partitionsToUpdate);
       }
       loadFileMetadataForPartitions(partitionsToLoadFiles, /* isRefresh=*/true);
-      fileLoadMdTime = clock.getTick() - startTime;
     }
-    return fileLoadMdTime;
   }
 
   /**
diff --git a/fe/src/main/java/org/apache/impala/catalog/KuduTable.java b/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
index 21feda7..338c979 100644
--- a/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
+++ b/fe/src/main/java/org/apache/impala/catalog/KuduTable.java
@@ -310,19 +310,14 @@ public class KuduTable extends Table implements FeKuduTable {
             " property found for Kudu table " + kuduTableName_);
       }
       setTableStats(msTable_);
-      // Load metadata from Kudu
-      final Timer.Context ctxStorageLdTime =
-          getMetrics().getTimer(Table.STORAGE_METADATA_LOAD_DURATION_METRIC).time();
+      // Load metadata from Kudu and HMS
       try {
         loadSchemaFromKudu();
+        loadAllColumnStats(msClient);
       } catch (ImpalaRuntimeException e) {
         throw new TableLoadingException("Error loading metadata for Kudu table " +
             kuduTableName_, e);
-      } finally {
-        storageMetadataLoadTime_ = ctxStorageLdTime.stop();
       }
-      // Load from HMS
-      loadAllColumnStats(msClient);
       refreshLastUsedTime();
       // Avoid updating HMS if the schema didn't change.
       if (msTable_.equals(msTbl)) return;
diff --git a/fe/src/main/java/org/apache/impala/catalog/Table.java b/fe/src/main/java/org/apache/impala/catalog/Table.java
index acab108..b0264c2 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Table.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Table.java
@@ -115,9 +115,6 @@ public abstract class Table extends CatalogObjectImpl implements FeTable {
   // True if this object is stored in an Impalad catalog cache.
   protected boolean storedInImpaladCatalogCache_ = false;
 
-  // Time spent in the source systems loading/reloading the fs metadata for the table.
-  protected long storageMetadataLoadTime_ = 0;
-
   // Last used time of this table in nanoseconds as returned by
   // CatalogdTableInvalidator.nanoTime(). This is only set in catalogd and not used by
   // impalad.
@@ -141,8 +138,6 @@ public abstract class Table extends CatalogObjectImpl implements FeTable {
   public static final String REFRESH_DURATION_METRIC = "refresh-duration";
   public static final String ALTER_DURATION_METRIC = "alter-duration";
   public static final String LOAD_DURATION_METRIC = "load-duration";
-  public static final String STORAGE_METADATA_LOAD_DURATION_METRIC =
-      "storage-metadata-load-duration";
 
   // Table property key for storing the time of the last DDL operation.
   public static final String TBL_PROP_LAST_DDL_TIME = "transient_lastDdlTime";
@@ -202,14 +197,10 @@ public abstract class Table extends CatalogObjectImpl implements FeTable {
     metrics_.addTimer(REFRESH_DURATION_METRIC);
     metrics_.addTimer(ALTER_DURATION_METRIC);
     metrics_.addTimer(LOAD_DURATION_METRIC);
-    metrics_.addTimer(STORAGE_METADATA_LOAD_DURATION_METRIC);
   }
 
   public Metrics getMetrics() { return metrics_; }
 
-  // Returns storage wait time during metadata load.
-  public long getStorageLoadTime() { return storageMetadataLoadTime_; }
-
   // Returns true if this table reference comes from the impalad catalog cache or if it
   // is loaded from the testing framework. Returns false if this table reference points
   // to a table stored in the catalog server.
@@ -408,8 +399,6 @@ public abstract class Table extends CatalogObjectImpl implements FeTable {
     accessLevel_ = thriftTable.isSetAccess_level() ? thriftTable.getAccess_level() :
         TAccessLevel.READ_WRITE;
 
-    storageMetadataLoadTime_ = thriftTable.getStorage_metadata_load_time();
-
     storedInImpaladCatalogCache_ = true;
     validWriteIds_ = thriftTable.isSetValid_write_ids() ?
         thriftTable.getValid_write_ids() : null;
@@ -446,7 +435,6 @@ public abstract class Table extends CatalogObjectImpl implements FeTable {
 
     TTable table = new TTable(db_.getName(), name_);
     table.setAccess_level(accessLevel_);
-    table.setStorage_metadata_load_time(storageMetadataLoadTime_);
 
     // Populate both regular columns and clustering columns (if there are any).
     table.setColumns(new ArrayList<>());
diff --git a/tests/query_test/test_observability.py b/tests/query_test/test_observability.py
index f1a9526..6f397df 100644
--- a/tests/query_test/test_observability.py
+++ b/tests/query_test/test_observability.py
@@ -585,32 +585,3 @@ class TestObservability(ImpalaTestSuite):
     query = "select count (*) from functional.alltypes"
     runtime_profile = self.execute_query(query).runtime_profile
     self.__verify_profile_event_sequence(event_regexes, runtime_profile)
-
-  def test_query_profile_storage_load_time_filesystem(self):
-    """Test that when a query needs load metadata for table(s), the
-    storage load time should be in the profile. Tests file systems."""
-    self.__check_query_profile_storage_load_time("functional")
-
-  @SkipIfS3.hbase
-  @SkipIfLocal.hbase
-  @SkipIfIsilon.hbase
-  @SkipIfABFS.hbase
-  @SkipIfADLS.hbase
-  def test_query_profile_storage_load_time(self):
-    """Test that when a query needs load metadata for table(s), the
-    storage load time should be in the profile. Tests kudu and hbase."""
-    # KUDU table
-    self.__check_query_profile_storage_load_time("functional_kudu")
-
-    # HBASE table
-    self.__check_query_profile_storage_load_time("functional_hbase")
-
-  def __check_query_profile_storage_load_time(self, db_name):
-    """Check query profile for storage load time with a given database."""
-    self.execute_query("invalidate metadata {0}.alltypes".format(db_name))
-    query = "select count (*) from {0}.alltypes".format(db_name)
-    runtime_profile = self.execute_query(query).runtime_profile
-    assert "storage-load-time" in runtime_profile
-    # Call the second time, no metastore loading needed.
-    runtime_profile = self.execute_query(query).runtime_profile
-    assert "storage-load-time" not in runtime_profile