You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@impala.apache.org by jr...@apache.org on 2017/02/24 21:42:02 UTC

[4/4] incubator-impala git commit: IMPALA-4902: Copy parameters map in HdfsPartition.toThrift().

IMPALA-4902: Copy parameters map in HdfsPartition.toThrift().

The bug: When generating the toThrift() of an HdfsTable,
each THdfsPartition used to contain a reference to its
partition's parameters map. As a result, one thread trying
to serialize a thrift table returned by toThrift() could
conflict with another thread updating the parameters maps of
the table partitions. Here are a few examples of operations
that may modify the parameters map:
COMPUTE [INCREMENTAL] STATS, DROP STATS,
ALTER TABLE SET TBLPROPERTIES, ALTER TABLE SET CACHED, etc.

The fix: Create a shallow copy of the parameters map in
HdfsPartition.toThrift(). This means that toThrift() itself
must be protected from concurrent modifications to the
parameters map. Callers of toThrift() are now required
to hold the table lock. One place where the lock was not
already held needed to be adjusted.

Testing:
- I was unable to reproduce the issue locally, but the stacks
  from the JIRAs point directly to the parameters map, and
  the races are pretty obvious from looking at the code.
- Passed a core/hdfs private run.

Change-Id: Ic11277ad5512d2431cd3cc791715917c95395ddf
Reviewed-on: http://gerrit.cloudera.org:8080/6127
Reviewed-by: Alex Behm <al...@cloudera.com>
Tested-by: Impala Public Jenkins


Project: http://git-wip-us.apache.org/repos/asf/incubator-impala/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-impala/commit/a7163684
Tree: http://git-wip-us.apache.org/repos/asf/incubator-impala/tree/a7163684
Diff: http://git-wip-us.apache.org/repos/asf/incubator-impala/diff/a7163684

Branch: refs/heads/master
Commit: a71636847fe742a9d0eb770516aff34ff16bbca1
Parents: 013456d
Author: Alex Behm <al...@cloudera.com>
Authored: Fri Feb 17 10:00:55 2017 -0800
Committer: Impala Public Jenkins <im...@gerrit.cloudera.org>
Committed: Fri Feb 24 10:18:38 2017 +0000

----------------------------------------------------------------------
 .../impala/catalog/CatalogServiceCatalog.java   | 90 +++++++++++---------
 .../apache/impala/catalog/HdfsPartition.java    |  7 +-
 .../java/org/apache/impala/catalog/Table.java   | 24 ++++--
 .../impala/service/CatalogOpExecutor.java       | 67 ++++++---------
 .../catalog/CatalogObjectToFromThriftTest.java  | 35 +++++---
 5 files changed, 122 insertions(+), 101 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a7163684/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java b/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
index 8be0aa3..00caf51 100644
--- a/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
+++ b/fe/src/main/java/org/apache/impala/catalog/CatalogServiceCatalog.java
@@ -52,6 +52,7 @@ import org.apache.impala.common.ImpalaException;
 import org.apache.impala.common.ImpalaRuntimeException;
 import org.apache.impala.common.JniUtil;
 import org.apache.impala.common.Pair;
+import org.apache.impala.common.Reference;
 import org.apache.impala.hive.executor.UdfExecutor;
 import org.apache.impala.thrift.TCatalog;
 import org.apache.impala.thrift.TCatalogObject;
@@ -925,13 +926,14 @@ public class CatalogServiceCatalog extends Catalog {
    * Reloads metadata for table 'tbl'. If 'tbl' is an IncompleteTable, it makes an
    * asynchronous request to the table loading manager to create a proper table instance
    * and load the metadata from Hive Metastore. Otherwise, it updates table metadata
-   * in-place by calling the load() function on the specified table. Returns 'tbl', if it
-   * is a fully loaded table (e.g. HdfsTable, HBaseTable, etc). Otherwise, returns a
-   * newly constructed fully loaded table. Applies proper synchronization to protect the
-   * metadata load from concurrent table modifications and assigns a new catalog version.
+   * in-place by calling the load() function on the specified table. Returns the
+   * TCatalogObject representing 'tbl', if it is a fully loaded table (e.g. HdfsTable,
+   * HBaseTable, etc). Otherwise, returns a newly constructed fully loaded TCatalogObject.
+   * Applies proper synchronization to protect the metadata load from concurrent table
+   * modifications and assigns a new catalog version.
    * Throws a CatalogException if there is an error loading table metadata.
    */
-  public Table reloadTable(Table tbl) throws CatalogException {
+  public TCatalogObject reloadTable(Table tbl) throws CatalogException {
     LOG.info(String.format("Refreshing table metadata: %s", tbl.getFullName()));
     TTableName tblName = new TTableName(tbl.getDb().getName().toLowerCase(),
         tbl.getName().toLowerCase());
@@ -951,7 +953,8 @@ public class CatalogServiceCatalog extends Catalog {
       try {
         // The table may have been dropped/modified while the load was in progress, so
         // only apply the update if the existing table hasn't changed.
-        return replaceTableIfUnchanged(loadReq.get(), previousCatalogVersion);
+        Table result = replaceTableIfUnchanged(loadReq.get(), previousCatalogVersion);
+        return result.toTCatalogObject();
       } finally {
         loadReq.close();
         LOG.info(String.format("Refreshed table metadata: %s", tbl.getFullName()));
@@ -978,7 +981,7 @@ public class CatalogServiceCatalog extends Catalog {
       }
       tbl.setCatalogVersion(newCatalogVersion);
       LOG.info(String.format("Refreshed table metadata: %s", tbl.getFullName()));
-      return tbl;
+      return tbl.toTCatalogObject();
     } finally {
       Preconditions.checkState(!catalogLock_.isWriteLockedByCurrentThread());
       tbl.getLock().unlock();
@@ -986,13 +989,12 @@ public class CatalogServiceCatalog extends Catalog {
   }
 
   /**
-   * Reloads the metadata of a table with name 'tableName'. Returns the table or null if
-   * the table does not exist.
+   * Reloads the metadata of a table with name 'tableName'.
    */
-  public Table reloadTable(TTableName tableName) throws CatalogException {
+  public void reloadTable(TTableName tableName) throws CatalogException {
     Table table = getTable(tableName.getDb_name(), tableName.getTable_name());
-    if (table == null) return null;
-    return reloadTable(table);
+    if (table == null) return;
+    reloadTable(table);
   }
 
   /**
@@ -1047,27 +1049,25 @@ public class CatalogServiceCatalog extends Catalog {
    * Invalidates the table in the catalog cache, potentially adding/removing the table
    * from the cache based on whether it exists in the Hive Metastore.
    * The invalidation logic is:
-   * - If the table exists in the metastore, add it to the catalog as an uninitialized
+   * - If the table exists in the Metastore, add it to the catalog as an uninitialized
    *   IncompleteTable (replacing any existing entry). The table metadata will be
    *   loaded lazily, on the next access. If the parent database for this table does not
    *   yet exist in Impala's cache it will also be added.
-   * - If the table does not exist in the metastore, remove it from the catalog cache.
-   * - If we are unable to determine whether the table exists in the metastore (there was
+   * - If the table does not exist in the Metastore, remove it from the catalog cache.
+   * - If we are unable to determine whether the table exists in the Metastore (there was
    *   an exception thrown making the RPC), invalidate any existing Table by replacing
    *   it with an uninitialized IncompleteTable.
-   *
-   * The parameter updatedObjects is a Pair that contains details on what catalog objects
-   * were modified as a result of the invalidateTable() call. The first item in the Pair
-   * is a Db which will only be set if a new database was added as a result of this call,
-   * otherwise it will be null. The second item in the Pair is the Table that was
-   * modified/added/removed.
-   * Returns a flag that indicates whether the items in updatedObjects were removed
-   * (returns true) or added/modified (return false). Only Tables should ever be removed.
+   * Returns the thrift representation of the added/updated/removed table, or null if
+   * the table was not present in the catalog cache or the Metastore.
+   * Sets tblWasRemoved to true if the table was absent from the Metastore and it was
+   * removed from the catalog cache.
+   * Sets dbWasAdded to true if both a new database and table were added to the catalog
+   * cache.
    */
-  public boolean invalidateTable(TTableName tableName, Pair<Db, Table> updatedObjects) {
-    Preconditions.checkNotNull(updatedObjects);
-    updatedObjects.first = null;
-    updatedObjects.second = null;
+  public TCatalogObject invalidateTable(TTableName tableName,
+      Reference<Boolean> tblWasRemoved, Reference<Boolean> dbWasAdded) {
+    tblWasRemoved.setRef(false);
+    dbWasAdded.setRef(false);
     String dbName = tableName.getDb_name();
     String tblName = tableName.getTable_name();
     LOG.info(String.format("Invalidating table metadata: %s.%s", dbName, tblName));
@@ -1092,17 +1092,19 @@ public class CatalogServiceCatalog extends Catalog {
       }
 
       if (tableExistsInMetaStore != null && !tableExistsInMetaStore) {
-        updatedObjects.second = removeTable(dbName, tblName);
-        return true;
+        Table result = removeTable(dbName, tblName);
+        if (result == null) return null;
+        tblWasRemoved.setRef(true);
+        return result.toTCatalogObject();
       }
 
       db = getDb(dbName);
       if ((db == null || !db.containsTable(tblName)) && tableExistsInMetaStore == null) {
         // The table does not exist in our cache AND it is unknown whether the
-        // table exists in the metastore. Do nothing.
-        return false;
+        // table exists in the Metastore. Do nothing.
+        return null;
       } else if (db == null && tableExistsInMetaStore) {
-        // The table exists in the metastore, but our cache does not contain the parent
+        // The table exists in the Metastore, but our cache does not contain the parent
         // database. A new db will be added to the cache along with the new table. msDb
         // must be valid since tableExistsInMetaStore is true.
         try {
@@ -1111,11 +1113,11 @@ public class CatalogServiceCatalog extends Catalog {
           db = new Db(dbName, this, msDb);
           db.setCatalogVersion(incrementAndGetCatalogVersion());
           addDb(db);
-          updatedObjects.first = db;
+          dbWasAdded.setRef(true);
         } catch (TException e) {
-          // The metastore database cannot be get. Log the error and return.
+          // The Metastore database cannot be get. Log the error and return.
           LOG.error("Error executing getDatabase() metastore call: " + dbName, e);
-          return false;
+          return null;
         }
       }
     }
@@ -1130,8 +1132,12 @@ public class CatalogServiceCatalog extends Catalog {
       tableLoadingMgr_.backgroundLoad(new TTableName(dbName.toLowerCase(),
           tblName.toLowerCase()));
     }
-    updatedObjects.second = newTable;
-    return false;
+    if (dbWasAdded.getRef()) {
+      // The database should always have a lower catalog version than the table because
+      // it needs to be created before the table can be added.
+      Preconditions.checkState(db.getCatalogVersion() < newTable.getCatalogVersion());
+    }
+    return newTable.toTCatalogObject();
   }
 
   /**
@@ -1290,10 +1296,10 @@ public class CatalogServiceCatalog extends Catalog {
 
   /**
    * Reloads metadata for the partition defined by the partition spec
-   * 'partitionSpec' in table 'tbl'. Returns the table object with partition
-   * metadata reloaded
+   * 'partitionSpec' in table 'tbl'. Returns the resulting table's TCatalogObject after
+   * the partition metadata was reloaded.
    */
-  public Table reloadPartition(Table tbl, List<TPartitionKeyValue> partitionSpec)
+  public TCatalogObject reloadPartition(Table tbl, List<TPartitionKeyValue> partitionSpec)
       throws CatalogException {
     if (!tryLockTable(tbl)) {
       throw new CatalogException(String.format("Error reloading partition of table %s " +
@@ -1324,7 +1330,7 @@ public class CatalogServiceCatalog extends Catalog {
             hdfsTable.dropPartition(partitionSpec);
             hdfsTable.setCatalogVersion(newCatalogVersion);
           }
-          return hdfsTable;
+          return hdfsTable.toTCatalogObject();
         } catch (Exception e) {
           throw new CatalogException("Error loading metadata for partition: "
               + hdfsTable.getFullName() + " " + partitionName, e);
@@ -1334,7 +1340,7 @@ public class CatalogServiceCatalog extends Catalog {
       hdfsTable.setCatalogVersion(newCatalogVersion);
       LOG.info(String.format("Refreshed partition metadata: %s %s",
           hdfsTable.getFullName(), partitionName));
-      return hdfsTable;
+      return hdfsTable.toTCatalogObject();
     } finally {
       Preconditions.checkState(!catalogLock_.isWriteLockedByCurrentThread());
       tbl.getLock().unlock();

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a7163684/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java b/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
index c240613..5db2247 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
@@ -762,8 +762,11 @@ public class HdfsPartition implements Comparable<HdfsPartition> {
     thriftHdfsPart.setAccess_level(accessLevel_);
     thriftHdfsPart.setIs_marked_cached(isMarkedCached_);
     thriftHdfsPart.setId(getId());
-    thriftHdfsPart.setHms_parameters(
-        includeIncrementalStats ? hmsParameters_ : getFilteredHmsParameters());
+    // IMPALA-4902: Shallow-clone the map to avoid concurrent modifications. One thread
+    // may try to serialize the returned THdfsPartition after releasing the table's lock,
+    // and another thread doing DDL may modify the map.
+    thriftHdfsPart.setHms_parameters(Maps.newHashMap(
+        includeIncrementalStats ? hmsParameters_ : getFilteredHmsParameters()));
     if (includeFileDesc) {
       // Add block location information
       for (FileDescriptor fd: fileDescriptors_) {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a7163684/fe/src/main/java/org/apache/impala/catalog/Table.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/Table.java b/fe/src/main/java/org/apache/impala/catalog/Table.java
index 01a4e55..61289a5 100644
--- a/fe/src/main/java/org/apache/impala/catalog/Table.java
+++ b/fe/src/main/java/org/apache/impala/catalog/Table.java
@@ -17,22 +17,19 @@
 
 package org.apache.impala.catalog;
 
-import java.util.concurrent.locks.ReentrantLock;
 import java.util.ArrayList;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.locks.ReentrantLock;
 
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.log4j.Logger;
-
 import org.apache.impala.analysis.TableName;
-import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.ImpalaRuntimeException;
 import org.apache.impala.common.Pair;
 import org.apache.impala.thrift.TAccessLevel;
@@ -44,6 +41,8 @@ import org.apache.impala.thrift.TTable;
 import org.apache.impala.thrift.TTableDescriptor;
 import org.apache.impala.thrift.TTableStats;
 import org.apache.impala.util.HdfsCachingUtil;
+import org.apache.log4j.Logger;
+
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -69,7 +68,7 @@ public abstract class Table implements CatalogObject {
   protected TTableDescriptor tableDesc_;
   protected TAccessLevel accessLevel_ = TAccessLevel.READ_WRITE;
   // Lock protecting this table
-  private final ReentrantLock tableLock_ = new ReentrantLock(true);
+  private final ReentrantLock tableLock_ = new ReentrantLock();
 
   // Number of clustering columns.
   protected int numClusteringCols_;
@@ -297,7 +296,22 @@ public abstract class Table implements CatalogObject {
     }
   }
 
+  /**
+   * Must be called with 'tableLock_' held to protect against concurrent modifications
+   * while producing the TTable result.
+   */
   public TTable toThrift() {
+    // It would be simple to acquire and release the lock in this function.
+    // However, in most cases toThrift() is called after modifying a table for which
+    // the table lock should already be held, and we want the toThrift() to be consistent
+    // with the modification. So this check helps us identify places where the lock
+    // acquisition is probably missing entirely.
+    if (!tableLock_.isHeldByCurrentThread()) {
+      throw new IllegalStateException(
+          "Table.toThrift() called without holding the table lock: " +
+              getFullName() + " " + getClass().getName());
+    }
+
     TTable table = new TTable(db_.getName(), name_);
     table.setAccess_level(accessLevel_);
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a7163684/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
index c11f990..9d82c07 100644
--- a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
+++ b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
@@ -3069,63 +3069,50 @@ public class CatalogOpExecutor {
     resp.getResult().setCatalog_service_id(JniCatalog.getServiceId());
 
     if (req.isSetTable_name()) {
-      // Tracks any CatalogObjects updated/added/removed as a result of
-      // the invalidate metadata or refresh call. For refresh() it is only expected
-      // that a table be modified, but for invalidateTable() the table's parent database
-      // may have also been added if it did not previously exist in the catalog.
-      Pair<Db, Table> modifiedObjects = new Pair<Db, Table>(null, null);
-
-      boolean wasRemoved = false;
+      // Results of an invalidate operation, indicating whether the table was removed
+      // from the Metastore, and whether a new database was added to Impala as a result
+      // of the invalidate operation. Always false for refresh.
+      Reference<Boolean> tblWasRemoved = new Reference<Boolean>(false);
+      Reference<Boolean> dbWasAdded = new Reference<Boolean>(false);
+      // Thrift representation of the result of the invalidate/refresh operation.
+      TCatalogObject updatedThriftTable = null;
       if (req.isIs_refresh()) {
         TableName tblName = TableName.fromThrift(req.getTable_name());
         Table tbl = getExistingTable(tblName.getDb(), tblName.getTbl());
-        if (tbl == null) {
-          modifiedObjects.second = null;
-        } else {
+        if (tbl != null) {
           if (req.isSetPartition_spec()) {
-            modifiedObjects.second = catalog_.reloadPartition(tbl,
-                req.getPartition_spec());
+            updatedThriftTable = catalog_.reloadPartition(tbl, req.getPartition_spec());
           } else {
-            modifiedObjects.second = catalog_.reloadTable(tbl);
+            updatedThriftTable = catalog_.reloadTable(tbl);
           }
         }
       } else {
-        wasRemoved = catalog_.invalidateTable(req.getTable_name(), modifiedObjects);
+        updatedThriftTable = catalog_.invalidateTable(
+            req.getTable_name(), tblWasRemoved, dbWasAdded);
       }
 
-      if (modifiedObjects.first == null) {
-        if (modifiedObjects.second == null) {
-          // Table does not exist in the meta store and Impala catalog, throw error.
-          throw new TableNotFoundException("Table not found: " +
-              req.getTable_name().getDb_name() + "." +
-              req.getTable_name().getTable_name());
-        }
-        TCatalogObject thriftTable = modifiedObjects.second.toTCatalogObject();
+      if (updatedThriftTable == null) {
+        // Table does not exist in the Metastore and Impala catalog, throw error.
+        throw new TableNotFoundException("Table not found: " +
+            req.getTable_name().getDb_name() + "." +
+            req.getTable_name().getTable_name());
+      }
+
+      if (!dbWasAdded.getRef()) {
         // Return the TCatalogObject in the result to indicate this request can be
         // processed as a direct DDL operation.
-        if (wasRemoved) {
-          resp.getResult().setRemoved_catalog_object_DEPRECATED(thriftTable);
+        if (tblWasRemoved.getRef()) {
+          resp.getResult().setRemoved_catalog_object_DEPRECATED(updatedThriftTable);
         } else {
-          resp.getResult().setUpdated_catalog_object_DEPRECATED(thriftTable);
+          resp.getResult().setUpdated_catalog_object_DEPRECATED(updatedThriftTable);
         }
-        resp.getResult().setVersion(thriftTable.getCatalog_version());
       } else {
-        // If there were two catalog objects modified it indicates there was an
-        // "invalidateTable()" call that added a new table AND database to the catalog.
+        // Since multiple catalog objects were modified (db and table), don't treat this
+        // as a direct DDL operation. Set the overall catalog version and the impalad
+        // will wait for a statestore heartbeat that contains the update.
         Preconditions.checkState(!req.isIs_refresh());
-        Preconditions.checkNotNull(modifiedObjects.first);
-        Preconditions.checkNotNull(modifiedObjects.second);
-
-        // The database should always have a lower catalog version than the table because
-        // it needs to be created before the table can be added.
-        Preconditions.checkState(modifiedObjects.first.getCatalogVersion() <
-            modifiedObjects.second.getCatalogVersion());
-
-        // Since multiple catalog objects were modified, don't treat this as a direct DDL
-        // operation. Just set the overall catalog version and the impalad will wait for
-        // a statestore heartbeat that contains the update.
-        resp.getResult().setVersion(modifiedObjects.second.getCatalogVersion());
       }
+      resp.getResult().setVersion(updatedThriftTable.getCatalog_version());
     } else {
       // Invalidate the entire catalog if no table name is provided.
       Preconditions.checkArgument(!req.isIs_refresh());

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/a7163684/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java b/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java
index ebd99ba..7d80ce0 100644
--- a/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java
+++ b/fe/src/test/java/org/apache/impala/catalog/CatalogObjectToFromThriftTest.java
@@ -21,15 +21,10 @@ import static org.junit.Assert.fail;
 
 import java.util.Map;
 
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import org.apache.impala.testutil.CatalogServiceTestCatalog;
 import org.apache.impala.analysis.LiteralExpr;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.ImpalaException;
+import org.apache.impala.testutil.CatalogServiceTestCatalog;
 import org.apache.impala.thrift.ImpalaInternalServiceConstants;
 import org.apache.impala.thrift.TAccessLevel;
 import org.apache.impala.thrift.THBaseTable;
@@ -37,6 +32,11 @@ import org.apache.impala.thrift.THdfsPartition;
 import org.apache.impala.thrift.THdfsTable;
 import org.apache.impala.thrift.TTable;
 import org.apache.impala.thrift.TTableType;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
 import com.google.common.collect.Lists;
 
 /**
@@ -59,7 +59,7 @@ public class CatalogObjectToFromThriftTest {
                         "functional_seq"};
     for (String dbName: dbNames) {
       Table table = catalog_.getOrLoadTable(dbName, "alltypes");
-      TTable thriftTable = table.toThrift();
+      TTable thriftTable = getThriftTable(table);
       Assert.assertEquals(thriftTable.tbl_name, "alltypes");
       Assert.assertEquals(thriftTable.db_name, dbName);
       Assert.assertTrue(thriftTable.isSetTable_type());
@@ -125,7 +125,7 @@ public class CatalogObjectToFromThriftTest {
   public void TestMismatchedAvroAndTableSchemas() throws CatalogException {
     Table table = catalog_.getOrLoadTable("functional_avro_snap",
         "schema_resolution_test");
-    TTable thriftTable = table.toThrift();
+    TTable thriftTable = getThriftTable(table);
     Assert.assertEquals(thriftTable.tbl_name, "schema_resolution_test");
     Assert.assertTrue(thriftTable.isSetTable_type());
     Assert.assertEquals(thriftTable.getColumns().size(), 8);
@@ -145,7 +145,7 @@ public class CatalogObjectToFromThriftTest {
   public void TestHBaseTables() throws CatalogException {
     String dbName = "functional_hbase";
     Table table = catalog_.getOrLoadTable(dbName, "alltypes");
-    TTable thriftTable = table.toThrift();
+    TTable thriftTable = getThriftTable(table);
     Assert.assertEquals(thriftTable.tbl_name, "alltypes");
     Assert.assertEquals(thriftTable.db_name, dbName);
     Assert.assertTrue(thriftTable.isSetTable_type());
@@ -174,7 +174,7 @@ public class CatalogObjectToFromThriftTest {
       throws CatalogException {
     String dbName = "functional_hbase";
     Table table = catalog_.getOrLoadTable(dbName, "alltypessmallbinary");
-    TTable thriftTable = table.toThrift();
+    TTable thriftTable = getThriftTable(table);
     Assert.assertEquals(thriftTable.tbl_name, "alltypessmallbinary");
     Assert.assertEquals(thriftTable.db_name, dbName);
     Assert.assertTrue(thriftTable.isSetTable_type());
@@ -206,7 +206,7 @@ public class CatalogObjectToFromThriftTest {
   @Test
   public void TestTableLoadingErrors() throws ImpalaException {
     Table table = catalog_.getOrLoadTable("functional", "hive_index_tbl");
-    TTable thriftTable = table.toThrift();
+    TTable thriftTable = getThriftTable(table);
     Assert.assertEquals(thriftTable.tbl_name, "hive_index_tbl");
     Assert.assertEquals(thriftTable.db_name, "functional");
 
@@ -237,11 +237,22 @@ public class CatalogObjectToFromThriftTest {
   @Test
   public void TestView() throws CatalogException {
     Table table = catalog_.getOrLoadTable("functional", "view_view");
-    TTable thriftTable = table.toThrift();
+    TTable thriftTable = getThriftTable(table);
     Assert.assertEquals(thriftTable.tbl_name, "view_view");
     Assert.assertEquals(thriftTable.db_name, "functional");
     Assert.assertFalse(thriftTable.isSetHdfs_table());
     Assert.assertFalse(thriftTable.isSetHbase_table());
     Assert.assertTrue(thriftTable.isSetMetastore_table());
   }
+
+  private TTable getThriftTable(Table table) {
+    TTable thriftTable = null;
+    table.getLock().lock();
+    try {
+      thriftTable = table.toThrift();
+    } finally {
+      table.getLock().unlock();
+    }
+    return thriftTable;
+  }
 }