You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2018/07/25 18:27:10 UTC

[01/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Repository: hive
Updated Branches:
  refs/heads/master 71c49878c -> 758ff4490


http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
index 0000000,816a735..bf302ed
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java
@@@ -1,0 -1,1385 +1,1400 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore.client;
+ 
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.hive.common.StatsSetupConst;
+ import org.apache.hadoop.hive.metastore.ColumnType;
+ import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+ import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
+ import org.apache.hadoop.hive.metastore.TableType;
+ import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest;
+ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+ import org.apache.hadoop.hive.metastore.api.Catalog;
+ import org.apache.hadoop.hive.metastore.api.CreationMetadata;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PrincipalType;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.SkewedInfo;
+ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+ import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService;
+ import org.apache.thrift.TApplicationException;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.protocol.TProtocolException;
+ import org.apache.thrift.transport.TTransportException;
+ import org.junit.After;
+ import org.junit.Assert;
+ import org.junit.Before;
+ import org.junit.BeforeClass;
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ import org.junit.runner.RunWith;
+ import org.junit.runners.Parameterized;
+ 
+ import java.io.File;
+ import java.net.URI;
+ import java.net.URISyntaxException;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.HashSet;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Set;
+ 
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
+ 
+ /**
+  * Test class for IMetaStoreClient API. Testing the Table related functions for metadata
+  * manipulation, like creating, dropping and altering tables.
+  */
+ @RunWith(Parameterized.class)
+ @Category(MetastoreCheckinTest.class)
+ public class TestTablesCreateDropAlterTruncate extends MetaStoreClientTest {
+   private static final String DEFAULT_DATABASE = "default";
+   private static final String OTHER_DATABASE = "dummy";
+   private final AbstractMetaStoreService metaStore;
+   private IMetaStoreClient client;
+   private Table[] testTables = new Table[6];
+   private Table partitionedTable = null;
+   private Table externalTable = null;
+ 
+   public TestTablesCreateDropAlterTruncate(String name, AbstractMetaStoreService metaStore) {
+     this.metaStore = metaStore;
+   }
+ 
+   @BeforeClass
+   public static void startMetaStores() {
+     Map<MetastoreConf.ConfVars, String> msConf = new HashMap<MetastoreConf.ConfVars, String>();
+     // Enable trash, so it can be tested
+     Map<String, String> extraConf = new HashMap<>();
+     extraConf.put("fs.trash.checkpoint.interval", "30");  // FS_TRASH_CHECKPOINT_INTERVAL_KEY
+     extraConf.put("fs.trash.interval", "30");             // FS_TRASH_INTERVAL_KEY (hadoop-2)
+     startMetaStores(msConf, extraConf);
+   }
+ 
+   @Before
+   public void setUp() throws Exception {
+     // Get new client
+     client = metaStore.getClient();
+ 
+     // Clean up the database
+     client.dropDatabase(OTHER_DATABASE, true, true, true);
+     // Drop every table in the default database
+     for(String tableName : client.getAllTables(DEFAULT_DATABASE)) {
+       client.dropTable(DEFAULT_DATABASE, tableName, true, true, true);
+     }
+ 
+     // Clean up trash
+     metaStore.cleanWarehouseDirs();
+ 
+     testTables[0] =
+         new TableBuilder()
+             .setTableName("test_table")
+             .addCol("test_col", "int")
+             .create(client, metaStore.getConf());
+ 
+     testTables[1] =
+         new TableBuilder()
+             .setTableName("test_view")
+             .addCol("test_col", "int")
+             .setType("VIRTUAL_VIEW")
+             .create(client, metaStore.getConf());
+ 
+     testTables[2] =
+         new TableBuilder()
+             .setTableName("test_table_to_find_1")
+             .addCol("test_col", "int")
+             .create(client, metaStore.getConf());
+ 
+     testTables[3] =
+         new TableBuilder()
+             .setTableName("test_partitioned_table")
+             .addCol("test_col1", "int")
+             .addCol("test_col2", "int")
+             .addPartCol("test_part_col", "int")
+             .create(client, metaStore.getConf());
+ 
+     testTables[4] =
+         new TableBuilder()
+             .setTableName("external_table_for_test")
+             .addCol("test_col", "int")
+             .setLocation(metaStore.getWarehouseRoot() + "/external/table_dir")
+             .addTableParam("EXTERNAL", "TRUE")
+             .setType("EXTERNAL_TABLE")
+             .create(client, metaStore.getConf());
+ 
+ 
+     new DatabaseBuilder().setName(OTHER_DATABASE).create(client, metaStore.getConf());
+ 
+     testTables[5] =
+         new TableBuilder()
+             .setDbName(OTHER_DATABASE)
+             .setTableName("test_table")
+             .addCol("test_col", "int")
+             .create(client, metaStore.getConf());
+ 
+     // Create partitions for the partitioned table
+     for(int i=0; i < 2; i++) {
+       new PartitionBuilder()
+               .inTable(testTables[3])
+               .addValue("a" + i)
+               .addToTable(client, metaStore.getConf());
+     }
+     // Add an external partition too
+     new PartitionBuilder()
+         .inTable(testTables[3])
+         .addValue("a2")
+         .setLocation(metaStore.getWarehouseRoot() + "/external/a2")
+         .addToTable(client, metaStore.getConf());
+ 
+     // Add data files to the partitioned table
+     List<Partition> partitions =
+         client.listPartitions(testTables[3].getDbName(), testTables[3].getTableName(), (short)-1);
+     for(Partition partition : partitions) {
+       Path dataFile = new Path(partition.getSd().getLocation() + "/dataFile");
+       metaStore.createFile(dataFile, "100");
+     }
+ 
+     // Reload tables from the MetaStore, and create data files
+     for(int i=0; i < testTables.length; i++) {
+       testTables[i] = client.getTable(testTables[i].getDbName(), testTables[i].getTableName());
+       if (testTables[i].getPartitionKeys().isEmpty()) {
+         if (testTables[i].getSd().getLocation() != null) {
+           Path dataFile = new Path(testTables[i].getSd().getLocation() + "/dataFile");
+           metaStore.createFile(dataFile, "100");
+         }
+       }
+     }
+     partitionedTable = testTables[3];
+     externalTable = testTables[4];
+   }
+ 
+   @After
+   public void tearDown() throws Exception {
+     try {
+       if (client != null) {
+         try {
+           client.close();
+         } catch (Exception e) {
+           // HIVE-19729: Shallow the exceptions based on the discussion in the Jira
+         }
+       }
+     } finally {
+       client = null;
+     }
+   }
+ 
+   /**
+    * This test creates and queries a table and then drops it. Good for testing the happy path
+    */
+   @Test
+   public void testCreateGetDeleteTable() throws Exception {
+     // Try to create a table with all of the parameters set
+     Table table = getTableWithAllParametersSet();
+     client.createTable(table);
+     Table createdTable = client.getTable(table.getDbName(), table.getTableName());
+     // The createTime will be set on the server side, so the comparison should skip it
+     table.setCreateTime(createdTable.getCreateTime());
+     // The extra parameters will be added on server side, so check that the required ones are
+     // present
+     for(String key: table.getParameters().keySet()) {
+       Assert.assertEquals("parameters are the same",
+           table.getParameters().get(key), createdTable.getParameters().get(key));
+     }
+     // Reset the parameters, so we can compare
+     table.setParameters(createdTable.getParameters());
+     table.setCreationMetadata(createdTable.getCreationMetadata());
++    table.setWriteId(createdTable.getWriteId());
+     Assert.assertEquals("create/get table data", table, createdTable);
+ 
+     // Check that the directory is created
+     Assert.assertTrue("The directory should not be created",
+         metaStore.isPathExists(new Path(createdTable.getSd().getLocation())));
+ 
+     client.dropTable(table.getDbName(), table.getTableName(), true, false);
+     try {
+       client.getTable(table.getDbName(), table.getTableName());
+       Assert.fail("Expected a NoSuchObjectException to be thrown");
+     } catch (NoSuchObjectException exception) {
+       // Expected exception
+     }
+   }
+ 
+   @Test
+   public void testCreateTableDefaultValues() throws Exception {
+     Table table = new Table();
+     StorageDescriptor sd = new StorageDescriptor();
+     List<FieldSchema> cols = new ArrayList<>();
+ 
+     table.setDbName(DEFAULT_DATABASE);
+     table.setTableName("test_table_2");
+     cols.add(new FieldSchema("column_name", "int", null));
+     sd.setCols(cols);
+     sd.setSerdeInfo(new SerDeInfo());
+     table.setSd(sd);
+ 
+     client.createTable(table);
+     Table createdTable = client.getTable(table.getDbName(), table.getTableName());
+ 
+     Assert.assertEquals("Comparing OwnerType", PrincipalType.USER, createdTable.getOwnerType());
+     Assert.assertNull("Comparing OwnerName", createdTable.getOwner());
+     Assert.assertNotEquals("Comparing CreateTime", 0, createdTable.getCreateTime());
+     Assert.assertEquals("Comparing LastAccessTime", 0, createdTable.getLastAccessTime());
+     Assert.assertEquals("Comparing Retention", 0, createdTable.getRetention());
+     Assert.assertEquals("Comparing PartitionKeys", 0, createdTable.getPartitionKeys().size());
+     // TODO: If this test method is the first to run, then the parameters does not contain totalSize
+     // and numFiles, if this runs after other tests (setUp/dropDatabase is successful), then the
+     // totalSize and the numFiles are set.
+     Assert.assertEquals("Comparing Parameters length", 1, createdTable.getParameters().size());
+     Assert.assertNotEquals("Comparing Parameters(transient_lastDdlTime)", "0",
+         createdTable.getParameters().get("transient_lastDdlTime"));
+ //    Assert.assertEquals("Comparing Parameters(totalSize)", "0",
+ //        createdTable.getParameters().get("totalSize"));
+ //    Assert.assertEquals("Comparing Parameters(numFiles)", "0",
+ //        createdTable.getParameters().get("numFiles"));
+     Assert.assertNull("Comparing ViewOriginalText", createdTable.getViewOriginalText());
+     Assert.assertNull("Comparing ViewExpandedText", createdTable.getViewExpandedText());
+     Assert.assertEquals("Comparing TableType", "MANAGED_TABLE", createdTable.getTableType());
+     Assert.assertTrue("Creation metadata should be empty", createdTable.getCreationMetadata() == null);
+ 
+     // Storage Descriptor data
+     StorageDescriptor createdSd = createdTable.getSd();
+     Assert.assertEquals("Storage descriptor cols", 1, createdSd.getCols().size());
+     Assert.assertNull("Storage descriptor cols[0].comment",
+         createdSd.getCols().get(0).getComment());
+     Assert.assertEquals("Storage descriptor location", metaStore.getWarehouseRoot()
+         + "/" + table.getTableName(), createdSd.getLocation());
+     Assert.assertTrue("Table path should be created",
+         metaStore.isPathExists(new Path(createdSd.getLocation())));
+     // TODO: Embedded MetaStore changes the table object when client.createTable is called
+     //Assert.assertNull("Original table storage descriptor location should be null",
+     //    table.getSd().getLocation());
+ 
+     Assert.assertNull("Storage descriptor input format", createdSd.getInputFormat());
+     Assert.assertNull("Storage descriptor output format", createdSd.getOutputFormat());
+     Assert.assertFalse("Storage descriptor compressed", createdSd.isCompressed());
+     Assert.assertEquals("Storage descriptor num buckets", 0, createdSd.getNumBuckets());
+     Assert.assertEquals("Storage descriptor bucket cols", 0, createdSd.getBucketCols().size());
+     Assert.assertEquals("Storage descriptor sort cols", 0, createdSd.getSortCols().size());
+     Assert.assertEquals("Storage descriptor parameters", 0, createdSd.getParameters().size());
+     Assert.assertFalse("Storage descriptor stored as subdir", createdSd.isStoredAsSubDirectories());
+ 
+     // Serde info
+     SerDeInfo serDeInfo = createdSd.getSerdeInfo();
+     Assert.assertNull("SerDeInfo name", serDeInfo.getName());
+     Assert.assertNull("SerDeInfo serialization lib", serDeInfo.getSerializationLib());
+     Assert.assertEquals("SerDeInfo parameters", 0, serDeInfo.getParameters().size());
+ 
+     // Skewed info
+     SkewedInfo skewedInfo = createdSd.getSkewedInfo();
+     Assert.assertEquals("Skewed info col names", 0, skewedInfo.getSkewedColNames().size());
+     Assert.assertEquals("Skewed info col values", 0, skewedInfo.getSkewedColValues().size());
+     Assert.assertEquals("Skewed info col value maps", 0,
+         skewedInfo.getSkewedColValueLocationMaps().size());
+   }
+ 
+   @Test
+   public void testCreateTableDefaultLocationInSpecificDatabase() throws Exception {
+     Table table = new Table();
+     StorageDescriptor sd = new StorageDescriptor();
+     List<FieldSchema> cols = new ArrayList<>();
+ 
+     table.setDbName(OTHER_DATABASE);
+     table.setTableName("test_table_2");
+     cols.add(new FieldSchema("column_name", "int", null));
+     sd.setCols(cols);
+     sd.setSerdeInfo(new SerDeInfo());
+     table.setSd(sd);
+ 
+     client.createTable(table);
+     Table createdTable = client.getTable(table.getDbName(), table.getTableName());
+     Assert.assertEquals("Storage descriptor location", metaStore.getWarehouseRoot()
+         + "/" + table.getDbName() + ".db/" + table.getTableName(),
+         createdTable.getSd().getLocation());
+   }
+ 
+   @Test
+   public void testCreateTableDefaultValuesView() throws Exception {
+     Table table = new Table();
+     StorageDescriptor sd = new StorageDescriptor();
+     List<FieldSchema> cols = new ArrayList<>();
+ 
+     table.setDbName(DEFAULT_DATABASE);
+     table.setTableName("test_table_2");
+     table.setTableType("VIRTUAL_VIEW");
+     cols.add(new FieldSchema("column_name", "int", null));
+     sd.setCols(cols);
+     sd.setSerdeInfo(new SerDeInfo());
+     table.setSd(sd);
+ 
+     client.createTable(table);
+     Table createdTable = client.getTable(table.getDbName(), table.getTableName());
+ 
+     // No location should be created for views
+     Assert.assertNull("Storage descriptor location should be null",
+         createdTable.getSd().getLocation());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testCreateTableNullDatabase() throws Exception {
+     Table table = testTables[0];
+     table.setDbName(null);
+ 
+     client.createTable(table);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testCreateTableNullTableName() throws Exception {
+     Table table = testTables[0];
+     table.setTableName(null);
+ 
+     client.createTable(table);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testCreateTableInvalidTableName() throws Exception {
+     Table table = testTables[0];
+     table.setTableName("test_table;");
+ 
+     client.createTable(table);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testCreateTableEmptyName() throws Exception {
+     Table table = testTables[0];
+     table.setTableName("");
+ 
+     client.createTable(table);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testCreateTableNullStorageDescriptor() throws Exception {
+     Table table = testTables[0];
+     table.setSd(null);
+ 
+     client.createTable(table);
+   }
+ 
+   private Table getNewTable() throws MetaException {
+     return new TableBuilder()
+                .setTableName("test_table_with_invalid_sd")
+                .addCol("test_col", "int")
+                .build(metaStore.getConf());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testCreateTableInvalidStorageDescriptorNullColumns() throws Exception {
+     Table table = getNewTable();
+     table.getSd().setCols(null);
+     client.createTable(table);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testCreateTableInvalidStorageDescriptorNullSerdeInfo() throws Exception {
+     Table table = getNewTable();
+     table.getSd().setSerdeInfo(null);
+ 
+     client.createTable(table);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testCreateTableInvalidStorageDescriptorNullColumnType() throws Exception {
+     Table table = getNewTable();
+     table.getSd().getCols().get(0).setType(null);
+ 
+     client.createTable(table);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testCreateTableInvalidStorageDescriptorInvalidColumnType() throws Exception {
+     Table table = getNewTable();
+     table.getSd().getCols().get(0).setType("xyz");
+ 
+     client.createTable(table);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testCreateTableNoSuchDatabase() throws Exception {
+     Table table = testTables[0];
+     table.setDbName("no_such_database");
+ 
+     client.createTable(table);
+   }
+ 
+   @Test(expected = AlreadyExistsException.class)
+   public void testCreateTableAlreadyExists() throws Exception {
+     Table table = testTables[0];
+ 
+     client.createTable(table);
+   }
+ 
+   @Test(expected = NoSuchObjectException.class)
+   public void testDropTableNoSuchDatabase() throws Exception {
+     Table table = testTables[2];
+ 
+     client.dropTable("no_such_database", table.getTableName(), true, false);
+   }
+ 
+   @Test(expected = NoSuchObjectException.class)
+   public void testDropTableNoSuchTable() throws Exception {
+     Table table = testTables[2];
+ 
+     client.dropTable(table.getDbName(), "no_such_table", true, false);
+   }
+ 
+   @Test(expected = NoSuchObjectException.class)
+   public void testDropTableNoSuchTableInTheDatabase() throws Exception {
+     Table table = testTables[2];
+ 
+     client.dropTable(OTHER_DATABASE, table.getTableName(), true, false);
+   }
+ 
+   @Test
+   public void testDropTableNullDatabase() throws Exception {
+     // Missing database in the query
+     try {
+       client.dropTable(null, OTHER_DATABASE, true, false);
+       // TODO: Should be checked on server side. On Embedded metastore it throws MetaException,
+       // on Remote metastore it throws TProtocolException
+       Assert.fail("Expected an MetaException or TProtocolException to be thrown");
+     } catch (MetaException exception) {
+       // Expected exception - Embedded MetaStore
+     } catch (TProtocolException exception) {
+       // Expected exception - Remote MetaStore
+     }
+   }
+ 
+   @Test
+   public void testDropTableNullTableName() throws Exception {
+     try {
+       client.dropTable(DEFAULT_DATABASE, null, true, false);
+       // TODO: Should be checked on server side. On Embedded metastore it throws MetaException,
+       // on Remote metastore it throws TProtocolException
+       Assert.fail("Expected an MetaException or TProtocolException to be thrown");
+     } catch (MetaException exception) {
+       // Expected exception - Embedded MetaStore
+     } catch (TProtocolException exception) {
+       // Expected exception - Remote MetaStore
+     }
+   }
+ 
+   @Test
+   public void testDropTableCaseInsensitive() throws Exception {
+     Table table = testTables[0];
+ 
+     // Test in upper case
+     client.dropTable(table.getDbName().toUpperCase(), table.getTableName().toUpperCase());
+     try {
+       client.getTable(table.getDbName(), table.getTableName());
+       Assert.fail("Expected a NoSuchObjectException to be thrown");
+     } catch (NoSuchObjectException exception) {
+       // Expected exception
+     }
+ 
+     // Test in mixed case
+     client.createTable(table);
+     client.dropTable("DeFaUlt", "TeST_tAbLE");
+     try {
+       client.getTable(table.getDbName(), table.getTableName());
+       Assert.fail("Expected a NoSuchObjectException to be thrown");
+     } catch (NoSuchObjectException exception) {
+       // Expected exception
+     }
+   }
+ 
+   @Test
+   public void testDropTableDeleteDir() throws Exception {
+     Table table = testTables[0];
+     Partition externalPartition = client.getPartition(partitionedTable.getDbName(),
+         partitionedTable.getTableName(), "test_part_col=a2");
+ 
+     client.dropTable(table.getDbName(), table.getTableName(), true, false);
+ 
+     Assert.assertFalse("Table path should be removed",
+         metaStore.isPathExists(new Path(table.getSd().getLocation())));
+ 
+     client.createTable(table);
+     client.dropTable(table.getDbName(), table.getTableName(), false, false);
+ 
+     Assert.assertTrue("Table path should be kept",
+         metaStore.isPathExists(new Path(table.getSd().getLocation())));
+ 
+     // Drop table with partitions
+     client.dropTable(partitionedTable.getDbName(), partitionedTable.getTableName(), true, false);
+ 
+     Assert.assertFalse("Table path should be removed",
+         metaStore.isPathExists(new Path(partitionedTable.getSd().getLocation())));
+ 
+     Assert.assertFalse("Extra partition path should be removed",
+         metaStore.isPathExists(new Path(externalPartition.getSd().getLocation())));
+   }
+ 
+   @Test
+   public void testDropTableIgnoreUnknown() throws Exception {
+     Table table = testTables[0];
+ 
+     // Check what happens, when we ignore these errors
+     client.dropTable("no_such_database", table.getTableName(), true, true);
+     client.dropTable(table.getDbName(), "no_such_table", false, true);
+     client.dropTable(OTHER_DATABASE, table.getTableName(), true, true);
+ 
+     // TODO: Strangely the default parametrization is to ignore missing tables
+     client.dropTable("no_such_database", table.getTableName());
+     client.dropTable(table.getDbName(), "no_such_table");
+     client.dropTable(OTHER_DATABASE, table.getTableName());
+   }
+ 
+   @Test
+   public void testDropTableWithPurge() throws Exception {
+     Table table = testTables[0];
+ 
+     client.dropTable(table.getDbName(), table.getTableName(), true, true, true);
+ 
+     Assert.assertFalse("Table path should be removed",
+         metaStore.isPathExists(new Path(table.getSd().getLocation())));
+     Assert.assertFalse("Table path should not be in trash",
+         metaStore.isPathExistsInTrash(new Path(table.getSd().getLocation())));
+   }
+ 
+   @Test
+   public void testDropTableWithoutPurge() throws Exception {
+     Table table = testTables[0];
+ 
+     client.dropTable(table.getDbName(), table.getTableName(), true, true, false);
+ 
+     Assert.assertFalse("Table path should be removed",
+         metaStore.isPathExists(new Path(table.getSd().getLocation())));
+     Assert.assertTrue("Table path should be in trash",
+         metaStore.isPathExistsInTrash(new Path(table.getSd().getLocation())));
+   }
+ 
+   @Test
+   public void testDropTableExternalWithPurge() throws Exception {
+     Table table = externalTable;
+ 
+     client.dropTable(table.getDbName(), table.getTableName(), true, true, true);
+ 
+     Assert.assertTrue("Table path should not be removed",
+         metaStore.isPathExists(new Path(table.getSd().getLocation())));
+     Assert.assertFalse("Table path should not be in trash",
+         metaStore.isPathExistsInTrash(new Path(table.getSd().getLocation())));
+   }
+ 
+   @Test
+   public void testDropTableExternalWithoutPurge() throws Exception {
+     Table table = externalTable;
+ 
+     client.dropTable(table.getDbName(), table.getTableName(), true, true, false);
+ 
+     Assert.assertTrue("Table path should not be removed",
+         metaStore.isPathExists(new Path(table.getSd().getLocation())));
+     Assert.assertFalse("Table path should be in trash",
+         metaStore.isPathExistsInTrash(new Path(table.getSd().getLocation())));
+   }
+ 
+   @Test
+   public void testTruncateTableUnpartitioned() throws Exception {
+     // Unpartitioned table
+     Path dataFile = new Path(testTables[0].getSd().getLocation() + "/dataFile");
+     client.truncateTable(testTables[0].getDbName(), testTables[0].getTableName(), null);
+     Assert.assertTrue("Location should exist",
+         metaStore.isPathExists(new Path(testTables[0].getSd().getLocation())));
+     Assert.assertFalse("DataFile should be removed", metaStore.isPathExists(dataFile));
+ 
+   }
+ 
+   @Test
+   public void testTruncateTablePartitioned() throws Exception {
+     // Partitioned table - delete specific partitions a0, a2
+     List<String> partitionsToDelete = new ArrayList<>();
+     partitionsToDelete.add("test_part_col=a0");
+     partitionsToDelete.add("test_part_col=a2");
+     client.truncateTable(partitionedTable.getDbName(), partitionedTable.getTableName(),
+         partitionsToDelete);
+     Assert.assertTrue("Location should exist",
+         metaStore.isPathExists(new Path(testTables[0].getSd().getLocation())));
+     List<Partition> partitions =
+         client.listPartitions(partitionedTable.getDbName(), partitionedTable.getTableName(),
+             (short)-1);
+     for(Partition partition : partitions) {
+       Path dataFile = new Path(partition.getSd().getLocation() + "/dataFile");
+       if (partition.getValues().contains("a0") || partition.getValues().contains("a2")) {
+         // a0, a2 should be empty
+         Assert.assertFalse("DataFile should be removed", metaStore.isPathExists(dataFile));
+       } else {
+         // Others (a1) should be kept
+         Assert.assertTrue("DataFile should not be removed", metaStore.isPathExists(dataFile));
+       }
+     }
+ 
+   }
+ 
+   @Test
+   public void testTruncateTablePartitionedDeleteAll() throws Exception {
+     // Partitioned table - delete all
+     client.truncateTable(partitionedTable.getDbName(), partitionedTable.getTableName(), null);
+     Assert.assertTrue("Location should exist",
+         metaStore.isPathExists(new Path(testTables[0].getSd().getLocation())));
+     List<Partition> partitions =
+         client.listPartitions(partitionedTable.getDbName(), partitionedTable.getTableName(),
+             (short)-1);
+     for(Partition partition : partitions) {
+       Path dataFile = new Path(partition.getSd().getLocation() + "/dataFile");
+       Assert.assertFalse("Every dataFile should be removed", metaStore.isPathExists(dataFile));
+     }
+   }
+ 
+   @Test
+   public void testAlterTable() throws Exception {
+     Table originalTable = testTables[2];
+     String originalTableName = originalTable.getTableName();
+     String originalDatabase = originalTable.getDbName();
+ 
+     Table newTable = getTableWithAllParametersSet();
+     newTable.setTableName(originalTableName);
+     newTable.setDbName(originalDatabase);
+     // Partition keys can not be set, but getTableWithAllParametersSet is added one, so remove for
+     // this test
+     newTable.setPartitionKeys(originalTable.getPartitionKeys());
+     client.alter_table(originalDatabase, originalTableName, newTable);
+     Table alteredTable = client.getTable(originalDatabase, originalTableName);
+ 
+     // The extra parameters will be added on server side, so check that the required ones are
+     // present
+     for(String key: newTable.getParameters().keySet()) {
+       Assert.assertEquals("parameters are present", newTable.getParameters().get(key),
+           alteredTable.getParameters().get(key));
+     }
+     // The parameters are checked manually, so do not check them
+     newTable.setParameters(alteredTable.getParameters());
+ 
+     // Some of the data is set on the server side, so reset those
+     newTable.setCreateTime(alteredTable.getCreateTime());
+     newTable.setCreationMetadata(alteredTable.getCreationMetadata());
++    newTable.setWriteId(alteredTable.getWriteId());
+     Assert.assertEquals("The table data should be the same", newTable, alteredTable);
+   }
+ 
+   @Test
+   public void testAlterTableRename() throws Exception {
+     Table originalTable = testTables[2];
+     String originalTableName = originalTable.getTableName();
+     String originalDatabase = originalTable.getDbName();
+ 
+     Table newTable = originalTable.deepCopy();
+     // Do not change the location, so it is tested that the location will be changed even if the
+     // location is not set to null, just remain the same
+     newTable.setTableName("new_table");
+     client.alter_table(originalDatabase, originalTableName, newTable);
+     List<String> tableNames = client.getTables(originalDatabase, originalTableName);
+     Assert.assertEquals("Original table should be removed", 0, tableNames.size());
+     Assert.assertFalse("Original table directory should be removed",
+         metaStore.isPathExists(new Path(originalTable.getSd().getLocation())));
+     Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName());
+     Assert.assertTrue("New table directory should exist",
+         metaStore.isPathExists(new Path(alteredTable.getSd().getLocation())));
+     Assert.assertEquals("New directory should be set", new Path(metaStore.getWarehouseRoot()
+         + "/" + alteredTable.getTableName()), new Path(alteredTable.getSd().getLocation()));
+ 
+     Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile");
+     Assert.assertTrue("New directory should contain data", metaStore.isPathExists(dataFile));
+ 
+     // The following data should be changed
+     newTable.getSd().setLocation(alteredTable.getSd().getLocation());
+     Assert.assertEquals("The table data should be the same", newTable, alteredTable);
+   }
+ 
+   @Test
+   public void testAlterTableChangingDatabase() throws Exception {
+     Table originalTable = testTables[2];
+     String originalTableName = originalTable.getTableName();
+     String originalDatabase = originalTable.getDbName();
+ 
+     Table newTable = originalTable.deepCopy();
+     newTable.setDbName(OTHER_DATABASE);
+     client.alter_table(originalDatabase, originalTableName, newTable);
+     List<String> tableNames = client.getTables(originalDatabase, originalTableName);
+     Assert.assertEquals("Original table should be removed", 0, tableNames.size());
+     Assert.assertFalse("Original table directory should be removed",
+         metaStore.isPathExists(new Path(originalTable.getSd().getLocation())));
+     Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName());
+     Assert.assertTrue("New table directory should exist",
+         metaStore.isPathExists(new Path(alteredTable.getSd().getLocation())));
+     Assert.assertEquals("New directory should be set", new Path(metaStore.getWarehouseRoot()
+         + "/" + alteredTable.getDbName() + ".db/" + alteredTable.getTableName()),
+         new Path(alteredTable.getSd().getLocation()));
+     Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile");
+     Assert.assertTrue("New directory should contain data", metaStore.isPathExists(dataFile));
+ 
+     // The following data should be changed, other data should be the same
+     newTable.getSd().setLocation(alteredTable.getSd().getLocation());
+     Assert.assertEquals("The table data should be the same", newTable, alteredTable);
+   }
+ 
+   @Test
+   public void testAlterTableExternalTable() throws Exception {
+     Table originalTable = externalTable;
+     String originalTableName = originalTable.getTableName();
+     String originalDatabase = originalTable.getDbName();
+ 
+     Table newTable = originalTable.deepCopy();
+     newTable.setTableName("new_external_table_for_test");
+     client.alter_table(originalDatabase, originalTableName, newTable);
+     List<String> tableNames = client.getTables(originalDatabase, originalTableName);
+     Assert.assertEquals("Original table should be removed", 0, tableNames.size());
+     Assert.assertTrue("Original table directory should be kept",
+         metaStore.isPathExists(new Path(originalTable.getSd().getLocation())));
+     Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName());
+     Assert.assertEquals("New location should be the same", originalTable.getSd().getLocation(),
+         alteredTable.getSd().getLocation());
+     Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile");
+     Assert.assertTrue("The location should contain data", metaStore.isPathExists(dataFile));
+ 
+     // The extra parameters will be added on server side, so check that the required ones are
+     // present
+     for(String key: newTable.getParameters().keySet()) {
+       Assert.assertEquals("parameters are present", newTable.getParameters().get(key),
+           alteredTable.getParameters().get(key));
+     }
+     // The parameters are checked manually, so do not check them
+     newTable.setParameters(alteredTable.getParameters());
+     Assert.assertEquals("The table data should be the same", newTable, alteredTable);
+   }
+ 
+   @Test
+   public void testAlterTableExternalTableChangeLocation() throws Exception {
+     Table originalTable = externalTable;
+ 
+     // Change the location, and see the results
+     Table newTable = originalTable.deepCopy();
+     newTable.getSd().setLocation(newTable.getSd().getLocation() + "_modified");
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+     Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName());
+     Assert.assertTrue("Original table directory should be kept",
+         metaStore.isPathExists(new Path(originalTable.getSd().getLocation())));
+     Assert.assertEquals("New location should be the new one", newTable.getSd().getLocation(),
+         alteredTable.getSd().getLocation());
+     Path dataFile = new Path(alteredTable.getSd().getLocation() + "/dataFile");
+     Assert.assertFalse("The location should not contain data", metaStore.isPathExists(dataFile));
+ 
+     // The extra parameters will be added on server side, so check that the required ones are
+     // present
+     for(String key: newTable.getParameters().keySet()) {
+       Assert.assertEquals("parameters are present", newTable.getParameters().get(key),
+           alteredTable.getParameters().get(key));
+     }
+     // The parameters are checked manually, so do not check them
+     newTable.setParameters(alteredTable.getParameters());
+ 
+     // The following data should be changed, other data should be the same
+     newTable.getSd().setLocation(alteredTable.getSd().getLocation());
+     Assert.assertEquals("The table data should be the same", newTable, alteredTable);
+   }
+ 
+   @Test
+   public void testAlterTableChangeCols() throws Exception {
+     Table originalTable = partitionedTable;
+ 
+     Table newTable = originalTable.deepCopy();
+ 
+     List<FieldSchema> cols = newTable.getSd().getCols();
+     // Change a column
+     cols.get(0).setName("modified_col");
+     // Remove a column
+     cols.remove(1);
+     // Add a new column
+     cols.add(new FieldSchema("new_col", "int", null));
+     // Store the changes
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+     Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName());
+     Assert.assertTrue("Original table directory should be kept",
+         metaStore.isPathExists(new Path(originalTable.getSd().getLocation())));
+ 
+     // The following data might be changed
+     alteredTable.setParameters(newTable.getParameters());
+     Assert.assertEquals("The table data should be the same", newTable, alteredTable);
+ 
+     // Modify partition column type, and comment
+     newTable.getPartitionKeys().get(0).setType("string");
+     newTable.getPartitionKeys().get(0).setComment("changed comment");
+ 
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+     alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName());
+     // The following data might be changed
+     alteredTable.setParameters(newTable.getParameters());
+     Assert.assertEquals("The table data should be the same", newTable, alteredTable);
+   }
+ 
+   @SuppressWarnings("deprecation")
+   @Test
+   public void testAlterTableCascade() throws Exception {
+     Table originalTable = partitionedTable;
+ 
+     Table newTable = originalTable.deepCopy();
+     List<FieldSchema> cols = newTable.getSd().getCols();
+     cols.add(new FieldSchema("new_col_1", "int", null));
+ 
+     // Run without cascade
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, false);
+     Table alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName());
+     Assert.assertEquals("The table data should be changed", newTable, alteredTable);
+ 
+     List<Partition> partitions =
+         client.listPartitions(originalTable.getDbName(), originalTable.getTableName(), (short)-1);
+     for(Partition partition : partitions) {
+       Assert.assertEquals("Partition columns should not be changed", 2,
+           partition.getSd().getCols().size());
+     }
+ 
+     // Run with cascade
+     cols.add(new FieldSchema("new_col_2", "int", null));
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable, true);
+     alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName());
+     Assert.assertEquals("The table data should be changed", newTable, alteredTable);
+ 
+     partitions =
+         client.listPartitions(originalTable.getDbName(), originalTable.getTableName(), (short)-1);
+     for(Partition partition : partitions) {
+       Assert.assertEquals("Partition columns should be changed", 4,
+           partition.getSd().getCols().size());
+     }
+ 
+     // Run using environment context with cascade
+     cols.add(new FieldSchema("new_col_3", "int", null));
+     EnvironmentContext context = new EnvironmentContext();
+     context.putToProperties(StatsSetupConst.CASCADE, "true");
+     client.alter_table_with_environmentContext(originalTable.getDbName(),
+         originalTable.getTableName(), newTable, context);
+     alteredTable = client.getTable(newTable.getDbName(), newTable.getTableName());
+     Assert.assertEquals("The table data should be changed", newTable, alteredTable);
+ 
+     partitions =
+         client.listPartitions(originalTable.getDbName(), originalTable.getTableName(), (short)-1);
+     for(Partition partition : partitions) {
+       Assert.assertEquals("Partition columns should be changed", 5,
+           partition.getSd().getCols().size());
+     }
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterTableNullDatabaseInNew() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+     newTable.setDbName(null);
+ 
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+   }
+ 
 -  @Test(expected = MetaException.class)
++  @Test
+   public void testAlterTableNullTableNameInNew() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+     newTable.setTableName(null);
+ 
 -    client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
++    try {
++      client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
++      Assert.fail("Expected exception");
++    } catch (MetaException | TProtocolException ex) {
++      // Expected.
++    }
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterTableInvalidTableNameInNew() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+     newTable.setTableName("test_table;");
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterTableEmptyTableNameInNew() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+     newTable.setTableName("");
+ 
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterTableNullStorageDescriptorInNew() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+     newTable.setSd(null);
+ 
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+   }
+ 
 -  @Test(expected = MetaException.class)
++  @Test
+   public void testAlterTableNullDatabase() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
 -
 -    client.alter_table(null, originalTable.getTableName(), newTable);
++    try {
++      client.alter_table(null, originalTable.getTableName(), newTable);
++      Assert.fail("Expected exception");
++    } catch (MetaException | TProtocolException ex) {
++    }
+   }
+ 
 -  @Test(expected = MetaException.class)
++  @Test
+   public void testAlterTableNullTableName() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+ 
 -    client.alter_table(originalTable.getDbName(), null, newTable);
++    try {
++      client.alter_table(originalTable.getDbName(), null, newTable);
++      Assert.fail("Expected exception");
++    } catch (MetaException | TProtocolException ex) {
++      // Expected.
++    }
+   }
+ 
+   @Test
+   public void testAlterTableNullNewTable() throws Exception {
+     Table originalTable = testTables[0];
+     try {
+       client.alter_table(originalTable.getDbName(), originalTable.getTableName(), null);
+       // TODO: Should be checked on server side. On Embedded metastore it throws
+       // NullPointerException, on Remote metastore it throws TTransportException
+       Assert.fail("Expected a NullPointerException or TTransportException to be thrown");
+     } catch (NullPointerException exception) {
+       // Expected exception - Embedded MetaStore
 -    } catch (TTransportException exception) {
++    } catch (TProtocolException exception) {
+       // Expected exception - Remote MetaStore
+     }
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterTableInvalidStorageDescriptorNullCols() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+     newTable.getSd().setCols(null);
+ 
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterTableInvalidStorageDescriptorNullSerdeInfo() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+     newTable.getSd().setSerdeInfo(null);
+ 
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterTableInvalidStorageDescriptorNullColumnType() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+     newTable.getSd().getCols().get(0).setType(null);
+ 
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterTableInvalidStorageDescriptorNullLocation() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+     newTable.getSd().setLocation(null);
+ 
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterTableInvalidStorageDescriptorInvalidColumnType() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+     newTable.getSd().getCols().get(0).setType("xyz");
+ 
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterTableInvalidStorageDescriptorAddPartitionColumns() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+     newTable.addToPartitionKeys(new FieldSchema("new_part", "int", "comment"));
+ 
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterTableInvalidStorageDescriptorAlterPartitionColumnName() throws Exception {
+     Table originalTable = partitionedTable;
+     Table newTable = originalTable.deepCopy();
+     newTable.getPartitionKeys().get(0).setName("altered_name");
+ 
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterTableInvalidStorageDescriptorRemovePartitionColumn() throws Exception {
+     Table originalTable = partitionedTable;
+     Table newTable = originalTable.deepCopy();
+     newTable.getPartitionKeys().remove(0);
+     client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterTableNoSuchDatabase() throws Exception {
+     Table originalTable = testTables[2];
+     Table newTable = originalTable.deepCopy();
+ 
+     client.alter_table("no_such_database", originalTable.getTableName(), newTable);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterTableNoSuchTable() throws Exception {
+     Table originalTable = testTables[2];
+     Table newTable = originalTable.deepCopy();
+ 
+     client.alter_table(originalTable.getDbName(), "no_such_table_name", newTable);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterTableNoSuchTableInThisDatabase() throws Exception {
+     Table originalTable = testTables[2];
+     Table newTable = originalTable.deepCopy();
+ 
+     client.alter_table(OTHER_DATABASE, originalTable.getTableName(), newTable);
+   }
+ 
+   @Test
+   public void testAlterTableAlreadyExists() throws Exception {
+     Table originalTable = testTables[0];
+     Table newTable = originalTable.deepCopy();
+ 
+     newTable.setTableName(testTables[2].getTableName());
+     try {
+       // Already existing table
+       client.alter_table(originalTable.getDbName(), originalTable.getTableName(), newTable);
+       // TODO: Maybe throw AlreadyExistsException.
+       Assert.fail("Expected an InvalidOperationException to be thrown");
+     } catch (InvalidOperationException exception) {
+       // Expected exception
+     }
+   }
+ 
+   @Test
+   public void tablesInOtherCatalogs() throws TException, URISyntaxException {
+     String catName = "create_etc_tables_in_other_catalogs";
+     Catalog cat = new CatalogBuilder()
+         .setName(catName)
+         .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName))
+         .build();
+     client.createCatalog(cat);
+ 
+     String dbName = "db_in_other_catalog";
+     // For this one don't specify a location to make sure it gets put in the catalog directory
+     Database db = new DatabaseBuilder()
+         .setName(dbName)
+         .setCatalogName(catName)
+         .create(client, metaStore.getConf());
+ 
+     String[] tableNames = new String[4];
+     for (int i = 0; i < tableNames.length; i++) {
+       tableNames[i] = "table_in_other_catalog_" + i;
+       TableBuilder builder = new TableBuilder()
+           .inDb(db)
+           .setTableName(tableNames[i])
+           .addCol("col1_" + i, ColumnType.STRING_TYPE_NAME)
+           .addCol("col2_" + i, ColumnType.INT_TYPE_NAME);
+       // Make one have a non-standard location
+       if (i == 0) builder.setLocation(MetaStoreTestUtils.getTestWarehouseDir(tableNames[i]));
+       // Make one partitioned
+       if (i == 2) builder.addPartCol("pcol1", ColumnType.STRING_TYPE_NAME);
+       // Make one a materialized view
+       if (i == 3) {
+         builder.setType(TableType.MATERIALIZED_VIEW.name())
+             .setRewriteEnabled(true)
+             .addMaterializedViewReferencedTable(dbName + "." + tableNames[0]);
+       }
+       client.createTable(builder.build(metaStore.getConf()));
+     }
+ 
+     // Add partitions for the partitioned table
+     String[] partVals = new String[3];
+     Table partitionedTable = client.getTable(catName, dbName, tableNames[2]);
+     for (int i = 0; i < partVals.length; i++) {
+       partVals[i] = "part" + i;
+       new PartitionBuilder()
+           .inTable(partitionedTable)
+           .addValue(partVals[i])
+           .addToTable(client, metaStore.getConf());
+     }
+ 
+     // Get tables, make sure the locations are correct
+     for (int i = 0; i < tableNames.length; i++) {
+       Table t = client.getTable(catName, dbName, tableNames[i]);
+       Assert.assertEquals(catName, t.getCatName());
+       String expectedLocation = (i < 1) ?
+         new File(MetaStoreTestUtils.getTestWarehouseDir(tableNames[i])).toURI().toString()
+         :
+         new File(cat.getLocationUri() + File.separatorChar + dbName + ".db",
+             tableNames[i]).toURI().toString();
+ 
+       Assert.assertEquals(expectedLocation, t.getSd().getLocation() + "/");
+       File dir = new File(new URI(t.getSd().getLocation()).getPath());
+       Assert.assertTrue(dir.exists() && dir.isDirectory());
+ 
+     }
+ 
+     // Make sure getting table in the wrong catalog does not work
+     try {
+       Table t = client.getTable(DEFAULT_DATABASE_NAME, tableNames[0]);
+       Assert.fail();
+     } catch (NoSuchObjectException e) {
+       // NOP
+     }
+ 
+     // test getAllTables
+     Set<String> fetchedNames = new HashSet<>(client.getAllTables(catName, dbName));
+     Assert.assertEquals(tableNames.length, fetchedNames.size());
+     for (String tableName : tableNames) Assert.assertTrue(fetchedNames.contains(tableName));
+ 
+     fetchedNames = new HashSet<>(client.getAllTables(DEFAULT_DATABASE_NAME));
+     for (String tableName : tableNames) Assert.assertFalse(fetchedNames.contains(tableName));
+ 
+     // test getMaterializedViewsForRewriting
+     List<String> materializedViews = client.getMaterializedViewsForRewriting(catName, dbName);
+     Assert.assertEquals(1, materializedViews.size());
+     Assert.assertEquals(tableNames[3], materializedViews.get(0));
+ 
+     fetchedNames = new HashSet<>(client.getMaterializedViewsForRewriting(DEFAULT_DATABASE_NAME));
+     Assert.assertFalse(fetchedNames.contains(tableNames[3]));
+ 
+     // test getTableObjectsByName
+     List<Table> fetchedTables = client.getTableObjectsByName(catName, dbName,
+         Arrays.asList(tableNames[0], tableNames[1]));
+     Assert.assertEquals(2, fetchedTables.size());
+     Collections.sort(fetchedTables);
+     Assert.assertEquals(tableNames[0], fetchedTables.get(0).getTableName());
+     Assert.assertEquals(tableNames[1], fetchedTables.get(1).getTableName());
+ 
+     fetchedTables = client.getTableObjectsByName(DEFAULT_DATABASE_NAME,
+         Arrays.asList(tableNames[0], tableNames[1]));
+     Assert.assertEquals(0, fetchedTables.size());
+ 
+     // Test altering the table
+     Table t = client.getTable(catName, dbName, tableNames[0]).deepCopy();
+     t.getParameters().put("test", "test");
+     client.alter_table(catName, dbName, tableNames[0], t);
+     t = client.getTable(catName, dbName, tableNames[0]).deepCopy();
+     Assert.assertEquals("test", t.getParameters().get("test"));
+ 
+     // Alter a table in the wrong catalog
+     try {
+       client.alter_table(DEFAULT_DATABASE_NAME, tableNames[0], t);
+       Assert.fail();
+     } catch (InvalidOperationException e) {
+       // NOP
+     }
+ 
+     // Update the metadata for the materialized view
+     CreationMetadata cm = client.getTable(catName, dbName, tableNames[3]).getCreationMetadata();
+     cm.addToTablesUsed(dbName + "." + tableNames[1]);
+     cm.unsetMaterializationTime();
+     client.updateCreationMetadata(catName, dbName, tableNames[3], cm);
+ 
+     List<String> partNames = new ArrayList<>();
+     for (String partVal : partVals) partNames.add("pcol1=" + partVal);
+     // Truncate a table
+     client.truncateTable(catName, dbName, tableNames[0], partNames);
+ 
+     // Truncate a table in the wrong catalog
+     try {
+       client.truncateTable(DEFAULT_DATABASE_NAME, tableNames[0], partNames);
+       Assert.fail();
+     } catch (NoSuchObjectException|TApplicationException e) {
+       // NOP
+     }
+ 
+     // Drop a table from the wrong catalog
+     try {
+       client.dropTable(DEFAULT_DATABASE_NAME, tableNames[0], true, false);
+       Assert.fail();
+     } catch (NoSuchObjectException|TApplicationException e) {
+       // NOP
+     }
+ 
+     // Should ignore the failure
+     client.dropTable(DEFAULT_DATABASE_NAME, tableNames[0], false, true);
+ 
+     // Have to do this in reverse order so that we drop the materialized view first.
+     for (int i = tableNames.length - 1; i >= 0; i--) {
+       t = client.getTable(catName, dbName, tableNames[i]);
+       File tableDir = new File(new URI(t.getSd().getLocation()).getPath());
+       Assert.assertTrue(tableDir.exists() && tableDir.isDirectory());
+ 
+       if (tableNames[i].equalsIgnoreCase(tableNames[0])) {
+         client.dropTable(catName, dbName, tableNames[i], false, false);
+         Assert.assertTrue(tableDir.exists() && tableDir.isDirectory());
+       } else {
+         client.dropTable(catName, dbName, tableNames[i]);
+         Assert.assertFalse(tableDir.exists());
+       }
+     }
+     Assert.assertEquals(0, client.getAllTables(catName, dbName).size());
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void createTableInBogusCatalog() throws TException {
+     new TableBuilder()
+         .setCatName("nosuch")
+         .setTableName("doomed")
+         .addCol("col1", ColumnType.STRING_TYPE_NAME)
+         .addCol("col2", ColumnType.INT_TYPE_NAME)
+         .create(client, metaStore.getConf());
+   }
+ 
+   @Test(expected = NoSuchObjectException.class)
+   public void getTableInBogusCatalog() throws TException {
+     client.getTable("nosuch", testTables[0].getDbName(), testTables[0].getTableName());
+   }
+ 
+   @Test
+   public void getAllTablesInBogusCatalog() throws TException {
+     List<String> names = client.getAllTables("nosuch", testTables[0].getDbName());
+     Assert.assertTrue(names.isEmpty());
+   }
+ 
+   @Test(expected = UnknownDBException.class)
+   public void getTableObjectsByNameBogusCatalog() throws TException {
+     client.getTableObjectsByName("nosuch", testTables[0].getDbName(),
+         Arrays.asList(testTables[0].getTableName(), testTables[1].getTableName()));
+   }
+ 
+   @Test
+   public void getMaterializedViewsInBogusCatalog() throws TException {
+     List<String> names = client.getMaterializedViewsForRewriting("nosuch", DEFAULT_DATABASE_NAME);
+     Assert.assertTrue(names.isEmpty());
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void alterTableBogusCatalog() throws TException {
+     Table t = testTables[0].deepCopy();
+     t.getParameters().put("a", "b");
+     client.alter_table("nosuch", t.getDbName(), t.getTableName(), t);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void moveTablesBetweenCatalogsOnAlter() throws TException {
+     String catName = "move_table_between_catalogs_on_alter";
+     Catalog cat = new CatalogBuilder()
+         .setName(catName)
+         .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName))
+         .build();
+     client.createCatalog(cat);
+ 
+     String dbName = "a_db";
+     // For this one don't specify a location to make sure it gets put in the catalog directory
+     Database db = new DatabaseBuilder()
+         .setName(dbName)
+         .setCatalogName(catName)
+         .create(client, metaStore.getConf());
+ 
+     String tableName = "non_movable_table";
+     Table before = new TableBuilder()
+         .inDb(db)
+         .setTableName(tableName)
+         .addCol("col1", ColumnType.STRING_TYPE_NAME)
+         .addCol("col2", ColumnType.INT_TYPE_NAME)
+         .create(client, metaStore.getConf());
+     Table after = before.deepCopy();
+     after.setCatName(DEFAULT_CATALOG_NAME);
+     client.alter_table(catName, dbName, tableName, after);
+ 
+   }
+ 
+   @Test
+   public void truncateTableBogusCatalog() throws TException {
+     try {
+       List<String> partNames = client.listPartitionNames(partitionedTable.getDbName(),
+           partitionedTable.getTableName(), (short) -1);
+       client.truncateTable("nosuch", partitionedTable.getDbName(), partitionedTable.getTableName(),
+           partNames);
+       Assert.fail(); // For reasons I don't understand and am too lazy to debug at the moment the
+       // NoSuchObjectException gets swallowed by a TApplicationException in remote mode.
+     } catch (TApplicationException|NoSuchObjectException e) {
+       //NOP
+     }
+   }
+ 
+   @Test(expected = NoSuchObjectException.class)
+   public void dropTableBogusCatalog() throws TException {
+     client.dropTable("nosuch", testTables[0].getDbName(), testTables[0].getTableName(), true, false);
+   }
+ 
+   /**
+    * Creates a Table with all of the parameters set. The temporary table is available only on HS2
+    * server, so do not use it.
+    * @return The Table object
+    */
+   private Table getTableWithAllParametersSet() throws MetaException {
+     return new TableBuilder()
+                .setDbName(DEFAULT_DATABASE)
+                .setTableName("test_table_with_all_parameters_set")
+                .setCreateTime(100)
+                .setOwnerType(PrincipalType.ROLE)
+                .setOwner("owner")
+                .setLastAccessTime(200)
+                .addPartCol("part_col", "int", "part col comment")
+                .addCol("test_col", "int", "test col comment")
+                .addCol("test_bucket_col", "int", "test bucket col comment")
+                .addCol("test_skewed_col", "int", "test skewed col comment")
+                .addCol("test_sort_col", "int", "test sort col comment")
+                .addBucketCol("test_bucket_col")
+                .addSkewedColName("test_skewed_col")
+                .addSortCol("test_sort_col", 1)
+                .setCompressed(true)
+                .setInputFormat("inputFormat")
+                .setInputFormat("outputFormat")
+                .setLocation(metaStore.getWarehouseRoot() + "/location")
+                .setNumBuckets(4)
+                .setRetention(30000)
+                .setRewriteEnabled(true)
+                .setType("VIEW")
+                .setViewExpandedText("viewExplainedText")
+                .setViewOriginalText("viewOriginalText")
+                .setSerdeLib("serdelib")
+                .setSerdeName("serdename")
+                .setStoredAsSubDirectories(true)
+                .addSerdeParam("serdeParam", "serdeParamValue")
+                .addTableParam("tableParam", "tableParamValue")
+                .addStorageDescriptorParam("sdParam", "sdParamValue")
+                .build(metaStore.getConf());
+   }
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/pom.xml
----------------------------------------------------------------------


[47/50] [abbrv] hive git commit: HIVE-19532 : fix tests for master-txnstats branch - fix one more out (Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-19532 : fix tests for master-txnstats branch - fix one more out  (Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/9f9ae73c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/9f9ae73c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/9f9ae73c

Branch: refs/heads/master
Commit: 9f9ae73cc2f02bcf71a90388fa8ac0d2ca5e28af
Parents: 8047dd8
Author: sergey <se...@apache.org>
Authored: Mon Jul 23 15:22:07 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Mon Jul 23 15:22:07 2018 -0700

----------------------------------------------------------------------
 .../clientpositive/llap/acid_no_buckets.q.out   | 46 ++------------------
 1 file changed, 4 insertions(+), 42 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/9f9ae73c/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out b/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out
index 9762de6..4cfb1d9 100644
--- a/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out
+++ b/ql/src/test/results/clientpositive/llap/acid_no_buckets.q.out
@@ -269,12 +269,13 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
 	bucketing_version   	2                   
 	numFiles            	9                   
 	numPartitions       	4                   
 	numRows             	2003                
 	rawDataSize         	0                   
-	totalSize           	17988               
+	totalSize           	18013               
 	transactional       	true                
 	transactional_properties	default             
 #### A masked pattern was here ####
@@ -380,12 +381,13 @@ Retention:          	0
 #### A masked pattern was here ####
 Table Type:         	MANAGED_TABLE       	 
 Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
 	bucketing_version   	2                   
 	numFiles            	9                   
 	numPartitions       	4                   
 	numRows             	2003                
 	rawDataSize         	0                   
-	totalSize           	17988               
+	totalSize           	18013               
 	transactional       	true                
 	transactional_properties	default             
 #### A masked pattern was here ####
@@ -538,18 +540,10 @@ POSTHOOK: Input: default@srcpart_acid@ds=2008-04-09/hr=12
 PREHOOK: query: select count(*) from srcpart_acid
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart_acid
-PREHOOK: Input: default@srcpart_acid@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart_acid@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart_acid@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart_acid@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*) from srcpart_acid
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart_acid
-POSTHOOK: Input: default@srcpart_acid@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart_acid@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart_acid@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart_acid@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 1990
 PREHOOK: query: merge into srcpart_acid t using (select distinct ds, hr, key, value from srcpart_acid) s
@@ -597,12 +591,10 @@ POSTHOOK: Lineage: merge_tmp_table.val EXPRESSION [(srcpart_acid)t.FieldSchema(n
 PREHOOK: query: select count(*) from srcpart_acid where ds='2008-04-08' and hr=='12'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart_acid
-PREHOOK: Input: default@srcpart_acid@ds=2008-04-08/hr=12
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*) from srcpart_acid where ds='2008-04-08' and hr=='12'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart_acid
-POSTHOOK: Input: default@srcpart_acid@ds=2008-04-08/hr=12
 #### A masked pattern was here ####
 0
 PREHOOK: query: select ds, hr, key, value from srcpart_acid where value like '%updated by merge'
@@ -1057,18 +1049,10 @@ POSTHOOK: Input: default@srcpart_acidb@ds=2008-04-09/hr=12
 PREHOOK: query: select count(*) from srcpart_acidb
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart_acidb
-PREHOOK: Input: default@srcpart_acidb@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart_acidb@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart_acidb@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart_acidb@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*) from srcpart_acidb
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart_acidb
-POSTHOOK: Input: default@srcpart_acidb@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart_acidb@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart_acidb@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart_acidb@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 1990
 PREHOOK: query: merge into srcpart_acidb t using (select distinct ds, hr, key, value from srcpart_acidb) s
@@ -1116,12 +1100,10 @@ POSTHOOK: Lineage: merge_tmp_table.val EXPRESSION [(srcpart_acidb)t.FieldSchema(
 PREHOOK: query: select count(*) from srcpart_acidb where ds='2008-04-08' and hr=='12'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart_acidb
-PREHOOK: Input: default@srcpart_acidb@ds=2008-04-08/hr=12
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*) from srcpart_acidb where ds='2008-04-08' and hr=='12'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart_acidb
-POSTHOOK: Input: default@srcpart_acidb@ds=2008-04-08/hr=12
 #### A masked pattern was here ####
 0
 PREHOOK: query: select ds, hr, key, value from srcpart_acidb where value like '%updated by merge'
@@ -1606,18 +1588,10 @@ POSTHOOK: Input: default@srcpart_acidv@ds=2008-04-09/hr=12
 PREHOOK: query: select count(*) from srcpart_acidv
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart_acidv
-PREHOOK: Input: default@srcpart_acidv@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart_acidv@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart_acidv@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart_acidv@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*) from srcpart_acidv
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart_acidv
-POSTHOOK: Input: default@srcpart_acidv@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart_acidv@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart_acidv@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart_acidv@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 1990
 PREHOOK: query: explain vectorization only detail
@@ -1918,12 +1892,10 @@ POSTHOOK: Lineage: merge_tmp_table.val EXPRESSION [(srcpart_acidv)t.FieldSchema(
 PREHOOK: query: select count(*) from srcpart_acidv where ds='2008-04-08' and hr=='12'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart_acidv
-PREHOOK: Input: default@srcpart_acidv@ds=2008-04-08/hr=12
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*) from srcpart_acidv where ds='2008-04-08' and hr=='12'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart_acidv
-POSTHOOK: Input: default@srcpart_acidv@ds=2008-04-08/hr=12
 #### A masked pattern was here ####
 0
 PREHOOK: query: select ds, hr, key, value from srcpart_acidv where value like '%updated by merge'
@@ -2408,18 +2380,10 @@ POSTHOOK: Input: default@srcpart_acidvb@ds=2008-04-09/hr=12
 PREHOOK: query: select count(*) from srcpart_acidvb
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart_acidvb
-PREHOOK: Input: default@srcpart_acidvb@ds=2008-04-08/hr=11
-PREHOOK: Input: default@srcpart_acidvb@ds=2008-04-08/hr=12
-PREHOOK: Input: default@srcpart_acidvb@ds=2008-04-09/hr=11
-PREHOOK: Input: default@srcpart_acidvb@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*) from srcpart_acidvb
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart_acidvb
-POSTHOOK: Input: default@srcpart_acidvb@ds=2008-04-08/hr=11
-POSTHOOK: Input: default@srcpart_acidvb@ds=2008-04-08/hr=12
-POSTHOOK: Input: default@srcpart_acidvb@ds=2008-04-09/hr=11
-POSTHOOK: Input: default@srcpart_acidvb@ds=2008-04-09/hr=12
 #### A masked pattern was here ####
 1990
 PREHOOK: query: explain vectorization only detail
@@ -2729,12 +2693,10 @@ POSTHOOK: Lineage: merge_tmp_table.val EXPRESSION [(srcpart_acidvb)t.FieldSchema
 PREHOOK: query: select count(*) from srcpart_acidvb where ds='2008-04-08' and hr=='12'
 PREHOOK: type: QUERY
 PREHOOK: Input: default@srcpart_acidvb
-PREHOOK: Input: default@srcpart_acidvb@ds=2008-04-08/hr=12
 #### A masked pattern was here ####
 POSTHOOK: query: select count(*) from srcpart_acidvb where ds='2008-04-08' and hr=='12'
 POSTHOOK: type: QUERY
 POSTHOOK: Input: default@srcpart_acidvb
-POSTHOOK: Input: default@srcpart_acidvb@ds=2008-04-08/hr=12
 #### A masked pattern was here ####
 0
 PREHOOK: query: select ds, hr, key, value from srcpart_acidvb where value like '%updated by merge'


[35/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0720

Posted by se...@apache.org.
HIVE-19416 : merge master into branch (Sergey Shelukhin) 0720


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/bdd3cec1
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/bdd3cec1
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/bdd3cec1

Branch: refs/heads/master
Commit: bdd3cec1f68178aa2128ee6db7fbc0a0c0bb021f
Parents: cdb32a7 06a4f98
Author: sergey <se...@apache.org>
Authored: Fri Jul 20 12:24:26 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Fri Jul 20 12:24:26 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |  10 ++
 ql/src/test/queries/clientpositive/interval_3.q |  23 ++++
 .../results/clientpositive/interval_3.q.out     | 111 +++++++++++++++++++
 3 files changed, 144 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/bdd3cec1/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
----------------------------------------------------------------------


[18/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java
index 0000000,4a97f89..267c9e8
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java
@@@ -1,0 -1,155 +1,162 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore.model;
+ 
+ import java.util.List;
+ import java.util.Map;
+ 
+ public class MPartition {
+ 
+   private String partitionName; // partitionname ==>  (key=value/)*(key=value)
+   private MTable table; 
+   private List<String> values;
+   private int createTime;
+   private int lastAccessTime;
+   private MStorageDescriptor sd;
+   private Map<String, String> parameters;
 -  
++  private long writeId;
+   
+   public MPartition() {}
+   
+   /**
+    * @param partitionName
+    * @param table
+    * @param values
+    * @param createTime
+    * @param lastAccessTime
+    * @param sd
+    * @param parameters
+    */
+   public MPartition(String partitionName, MTable table, List<String> values, int createTime,
+       int lastAccessTime, MStorageDescriptor sd, Map<String, String> parameters) {
+     this.partitionName = partitionName;
+     this.table = table;
+     this.values = values;
+     this.createTime = createTime;
+     this.lastAccessTime = lastAccessTime;
+     this.sd = sd;
+     this.parameters = parameters;
+   }
+ 
+   /**
+    * @return the lastAccessTime
+    */
+   public int getLastAccessTime() {
+     return lastAccessTime;
+   }
+ 
+   /**
+    * @param lastAccessTime the lastAccessTime to set
+    */
+   public void setLastAccessTime(int lastAccessTime) {
+     this.lastAccessTime = lastAccessTime;
+   }
+ 
+   /**
+    * @return the values
+    */
+   public List<String> getValues() {
+     return values;
+   }
+ 
+   /**
+    * @param values the values to set
+    */
+   public void setValues(List<String> values) {
+     this.values = values;
+   }
+ 
+   /**
+    * @return the table
+    */
+   public MTable getTable() {
+     return table;
+   }
+ 
+   /**
+    * @param table the table to set
+    */
+   public void setTable(MTable table) {
+     this.table = table;
+   }
+ 
+   /**
+    * @return the sd
+    */
+   public MStorageDescriptor getSd() {
+     return sd;
+   }
+ 
+   /**
+    * @param sd the sd to set
+    */
+   public void setSd(MStorageDescriptor sd) {
+     this.sd = sd;
+   }
+ 
+   /**
+    * @return the parameters
+    */
+   public Map<String, String> getParameters() {
+     return parameters;
+   }
+ 
+   /**
+    * @param parameters the parameters to set
+    */
+   public void setParameters(Map<String, String> parameters) {
+     this.parameters = parameters;
+   }
+ 
+   /**
+    * @return the partitionName
+    */
+   public String getPartitionName() {
+     return partitionName;
+   }
+ 
+   /**
+    * @param partitionName the partitionName to set
+    */
+   public void setPartitionName(String partitionName) {
+     this.partitionName = partitionName;
+   }
+ 
+   /**
+    * @return the createTime
+    */
+   public int getCreateTime() {
+     return createTime;
+   }
+ 
+   /**
+    * @param createTime the createTime to set
+    */
+   public void setCreateTime(int createTime) {
+     this.createTime = createTime;
+   }
+ 
++  public long getWriteId() {
++    return writeId;
++  }
++
++  public void setWriteId(long writeId) {
++    this.writeId = writeId;
++  }
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java
index 0000000,38ad479..deeb971
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java
@@@ -1,0 -1,273 +1,283 @@@
++
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore.model;
+ 
+ import java.util.List;
+ import java.util.Map;
+ 
+ public class MTable {
+   
+   private String tableName;
+   private MDatabase database;
+   private MStorageDescriptor sd;
+   private String owner;
+   private String ownerType;
+   private int createTime;
+   private int lastAccessTime;
+   private int retention;
+   private List<MFieldSchema> partitionKeys;
+   private Map<String, String> parameters;
+   private String viewOriginalText;
+   private String viewExpandedText;
+   private boolean rewriteEnabled;
+   private String tableType;
++  private long writeId;
+ 
+   public MTable() {}
+ 
+   /**
+    * @param tableName
+    * @param database
+    * @param sd
+    * @param owner
+    * @param ownerType
+    * @param createTime
+    * @param lastAccessTime
+    * @param retention
+    * @param partitionKeys
+    * @param parameters
+    * @param viewOriginalText
+    * @param viewExpandedText
+    * @param tableType
+    */
+   public MTable(String tableName, MDatabase database, MStorageDescriptor sd, String owner, String ownerType,
+       int createTime, int lastAccessTime, int retention, List<MFieldSchema> partitionKeys,
+       Map<String, String> parameters, String viewOriginalText, String viewExpandedText,
+       boolean rewriteEnabled, String tableType) {
+     this.tableName = tableName;
+     this.database = database;
+     this.sd = sd;
+     this.owner = owner;
+     this.ownerType = ownerType;
+     this.createTime = createTime;
+     this.setLastAccessTime(lastAccessTime);
+     this.retention = retention;
+     this.partitionKeys = partitionKeys;
+     this.parameters = parameters;
+     this.viewOriginalText = viewOriginalText;
+     this.viewExpandedText = viewExpandedText;
+     this.rewriteEnabled = rewriteEnabled;
+     this.tableType = tableType;
+   }
+ 
+   /**
+    * @return the tableName
+    */
+   public String getTableName() {
+     return tableName;
+   }
+ 
+   /**
+    * @param tableName the tableName to set
+    */
+   public void setTableName(String tableName) {
+     this.tableName = tableName;
+   }
+ 
+   /**
+    * @return the sd
+    */
+   public MStorageDescriptor getSd() {
+     return sd;
+   }
+ 
+   /**
+    * @param sd the sd to set
+    */
+   public void setSd(MStorageDescriptor sd) {
+     this.sd = sd;
+   }
+ 
+   /**
+    * @return the partKeys
+    */
+   public List<MFieldSchema> getPartitionKeys() {
+     return partitionKeys;
+   }
+ 
+   /**
+    * @param partKeys the partKeys to set
+    */
+   public void setPartitionKeys(List<MFieldSchema> partKeys) {
+     this.partitionKeys = partKeys;
+   }
+ 
+   /**
+    * @return the parameters
+    */
+   public Map<String, String> getParameters() {
+     return parameters;
+   }
+ 
+   /**
+    * @param parameters the parameters to set
+    */
+   public void setParameters(Map<String, String> parameters) {
+     this.parameters = parameters;
+   }
+ 
+   /**
+    * @return the original view text, or null if this table is not a view
+    */
+   public String getViewOriginalText() {
+     return viewOriginalText;
+   }
+ 
+   /**
+    * @param viewOriginalText the original view text to set
+    */
+   public void setViewOriginalText(String viewOriginalText) {
+     this.viewOriginalText = viewOriginalText;
+   }
+ 
+   /**
+    * @return the expanded view text, or null if this table is not a view
+    */
+   public String getViewExpandedText() {
+     return viewExpandedText;
+   }
+ 
+   /**
+    * @param viewExpandedText the expanded view text to set
+    */
+   public void setViewExpandedText(String viewExpandedText) {
+     this.viewExpandedText = viewExpandedText;
+   }
+ 
+   /**
+    * @return whether the view can be used for rewriting queries
+    */
+   public boolean isRewriteEnabled() {
+     return rewriteEnabled;
+   }
+ 
+   /**
+    * @param rewriteEnabled whether the view can be used for rewriting queries
+    */
+   public void setRewriteEnabled(boolean rewriteEnabled) {
+     this.rewriteEnabled = rewriteEnabled;
+   }
+ 
+   /**
+    * @return the owner
+    */
+   public String getOwner() {
+     return owner;
+   }
+ 
+   /**
+    * @param owner the owner to set
+    */
+   public void setOwner(String owner) {
+     this.owner = owner;
+   }
+ 
+   /**
+    * @return the owner type
+    */
+   public String getOwnerType() {
+     return ownerType;
+   }
+ 
+   /**
+    * @param ownerType the owner type to set
+    */
+   public void setOwnerType(String ownerType) {
+     this.ownerType = ownerType;
+   }
+ 
+   /**
+    * @return the createTime
+    */
+   public int getCreateTime() {
+     return createTime;
+   }
+ 
+   /**
+    * @param createTime the createTime to set
+    */
+   public void setCreateTime(int createTime) {
+     this.createTime = createTime;
+   }
+ 
+   /**
+    * @return the database
+    */
+   public MDatabase getDatabase() {
+     return database;
+   }
+ 
+   /**
+    * @param database the database to set
+    */
+   public void setDatabase(MDatabase database) {
+     this.database = database;
+   }
+ 
+   /**
+    * @return the retention
+    */
+   public int getRetention() {
+     return retention;
+   }
+ 
+   /**
+    * @param retention the retention to set
+    */
+   public void setRetention(int retention) {
+     this.retention = retention;
+   }
+ 
+   /**
+    * @param lastAccessTime the lastAccessTime to set
+    */
+   public void setLastAccessTime(int lastAccessTime) {
+     this.lastAccessTime = lastAccessTime;
+   }
+ 
+   /**
+    * @return the lastAccessTime
+    */
+   public int getLastAccessTime() {
+     return lastAccessTime;
+   }
+ 
+   /**
+    * @param tableType the tableType to set
+    */
+   public void setTableType(String tableType) {
+     this.tableType = tableType;
+   }
+ 
+   /**
+    * @return the tableType
+    */
+   public String getTableType() {
+     return tableType;
+   }
++
++  public long getWriteId() {
++    return writeId;
++  }
++
++  public void setWriteId(long writeId) {
++    this.writeId = writeId;
++  }
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
index 0000000,4e3068d..1f559e9
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
@@@ -1,0 -1,1107 +1,1158 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.txn;
+ 
++import org.apache.hadoop.hive.common.StatsSetupConst;
+ import org.apache.hadoop.hive.common.classification.RetrySemantics;
+ import org.apache.hadoop.hive.metastore.api.CompactionType;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.util.StringUtils;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import java.sql.Connection;
+ import java.sql.PreparedStatement;
+ import java.sql.ResultSet;
+ import java.sql.SQLException;
+ import java.sql.Statement;
+ import java.util.ArrayList;
+ import java.util.Collections;
+ import java.util.HashSet;
+ import java.util.List;
+ import java.util.Set;
+ 
+ /**
+  * Extends the transaction handler with methods needed only by the compactor threads.  These
+  * methods are not available through the thrift interface.
+  */
+ class CompactionTxnHandler extends TxnHandler {
+   static final private String CLASS_NAME = CompactionTxnHandler.class.getName();
+   static final private Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
+ 
+   public CompactionTxnHandler() {
+   }
+ 
+   /**
+    * This will look through the completed_txn_components table and look for partitions or tables
+    * that may be ready for compaction.  Also, look through txns and txn_components tables for
+    * aborted transactions that we should add to the list.
+    * @param maxAborted Maximum number of aborted queries to allow before marking this as a
+    *                   potential compaction.
+    * @return list of CompactionInfo structs.  These will not have id, type,
+    * or runAs set since these are only potential compactions not actual ones.
+    */
+   @Override
+   @RetrySemantics.ReadOnly
+   public Set<CompactionInfo> findPotentialCompactions(int maxAborted) throws MetaException {
+     Connection dbConn = null;
+     Set<CompactionInfo> response = new HashSet<>();
+     Statement stmt = null;
+     ResultSet rs = null;
+     try {
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         // Check for completed transactions
+         String s = "select distinct ctc_database, ctc_table, " +
+           "ctc_partition from COMPLETED_TXN_COMPONENTS";
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         while (rs.next()) {
+           CompactionInfo info = new CompactionInfo();
+           info.dbname = rs.getString(1);
+           info.tableName = rs.getString(2);
+           info.partName = rs.getString(3);
+           response.add(info);
+         }
+         rs.close();
+ 
+         // Check for aborted txns
+         s = "select tc_database, tc_table, tc_partition " +
+           "from TXNS, TXN_COMPONENTS " +
+           "where txn_id = tc_txnid and txn_state = '" + TXN_ABORTED + "' " +
+           "group by tc_database, tc_table, tc_partition " +
+           "having count(*) > " + maxAborted;
+ 
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         while (rs.next()) {
+           CompactionInfo info = new CompactionInfo();
+           info.dbname = rs.getString(1);
+           info.tableName = rs.getString(2);
+           info.partName = rs.getString(3);
+           info.tooManyAborts = true;
+           response.add(info);
+         }
+ 
+         LOG.debug("Going to rollback");
+         dbConn.rollback();
+       } catch (SQLException e) {
+         LOG.error("Unable to connect to transaction database " + e.getMessage());
+         checkRetryable(dbConn, e, "findPotentialCompactions(maxAborted:" + maxAborted + ")");
+       } finally {
+         close(rs, stmt, dbConn);
+       }
+       return response;
+     }
+     catch (RetryException e) {
+       return findPotentialCompactions(maxAborted);
+     }
+   }
+ 
+   /**
+    * Sets the user to run as.  This is for the case
+    * where the request was generated by the user and so the worker must set this value later.
+    * @param cq_id id of this entry in the queue
+    * @param user user to run the jobs as
+    */
+   @Override
+   @RetrySemantics.Idempotent
+   public void setRunAs(long cq_id, String user) throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "update COMPACTION_QUEUE set cq_run_as = '" + user + "' where cq_id = " + cq_id;
+         LOG.debug("Going to execute update <" + s + ">");
+         int updCnt = stmt.executeUpdate(s);
+         if (updCnt != 1) {
+           LOG.error("Unable to set cq_run_as=" + user + " for compaction record with cq_id=" + cq_id + ".  updCnt=" + updCnt);
+           LOG.debug("Going to rollback");
+           dbConn.rollback();
+         }
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.error("Unable to update compaction queue, " + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "setRunAs(cq_id:" + cq_id + ",user:" + user +")");
+       } finally {
+         closeDbConn(dbConn);
+         closeStmt(stmt);
+       }
+     } catch (RetryException e) {
+       setRunAs(cq_id, user);
+     }
+   }
+ 
+   /**
+    * This will grab the next compaction request off of
+    * the queue, and assign it to the worker.
+    * @param workerId id of the worker calling this, will be recorded in the db
+    * @return an info element for this compaction request, or null if there is no work to do now.
+    */
+   @Override
+   @RetrySemantics.SafeToRetry
+   public CompactionInfo findNextToCompact(String workerId) throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       //need a separate stmt for executeUpdate() otherwise it will close the ResultSet(HIVE-12725)
+       Statement updStmt = null;
+       ResultSet rs = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "select cq_id, cq_database, cq_table, cq_partition, " +
+           "cq_type, cq_tblproperties from COMPACTION_QUEUE where cq_state = '" + INITIATED_STATE + "'";
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (!rs.next()) {
+           LOG.debug("No compactions found ready to compact");
+           dbConn.rollback();
+           return null;
+         }
+         updStmt = dbConn.createStatement();
+         do {
+           CompactionInfo info = new CompactionInfo();
+           info.id = rs.getLong(1);
+           info.dbname = rs.getString(2);
+           info.tableName = rs.getString(3);
+           info.partName = rs.getString(4);
+           info.type = dbCompactionType2ThriftType(rs.getString(5).charAt(0));
+           info.properties = rs.getString(6);
+           // Now, update this record as being worked on by this worker.
+           long now = getDbTime(dbConn);
+           s = "update COMPACTION_QUEUE set cq_worker_id = '" + workerId + "', " +
+             "cq_start = " + now + ", cq_state = '" + WORKING_STATE + "' where cq_id = " + info.id +
+             " AND cq_state='" + INITIATED_STATE + "'";
+           LOG.debug("Going to execute update <" + s + ">");
+           int updCount = updStmt.executeUpdate(s);
+           if(updCount == 1) {
+             dbConn.commit();
+             return info;
+           }
+           if(updCount == 0) {
+             LOG.debug("Another Worker picked up " + info);
+             continue;
+           }
+           LOG.error("Unable to set to cq_state=" + WORKING_STATE + " for compaction record: " +
+             info + ". updCnt=" + updCount + ".");
+           dbConn.rollback();
+           return null;
+         } while( rs.next());
+         dbConn.rollback();
+         return null;
+       } catch (SQLException e) {
+         LOG.error("Unable to select next element for compaction, " + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "findNextToCompact(workerId:" + workerId + ")");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         closeStmt(updStmt);
+         close(rs, stmt, dbConn);
+       }
+     } catch (RetryException e) {
+       return findNextToCompact(workerId);
+     }
+   }
+ 
+   /**
+    * This will mark an entry in the queue as compacted
+    * and put it in the ready to clean state.
+    * @param info info on the compaction entry to mark as compacted.
+    */
+   @Override
+   @RetrySemantics.SafeToRetry
+   public void markCompacted(CompactionInfo info) throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "update COMPACTION_QUEUE set cq_state = '" + READY_FOR_CLEANING + "', " +
+           "cq_worker_id = null where cq_id = " + info.id;
+         LOG.debug("Going to execute update <" + s + ">");
+         int updCnt = stmt.executeUpdate(s);
+         if (updCnt != 1) {
+           LOG.error("Unable to set cq_state=" + READY_FOR_CLEANING + " for compaction record: " + info + ". updCnt=" + updCnt);
+           LOG.debug("Going to rollback");
+           dbConn.rollback();
+         }
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.error("Unable to update compaction queue " + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "markCompacted(" + info + ")");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         closeStmt(stmt);
+         closeDbConn(dbConn);
+       }
+     } catch (RetryException e) {
+       markCompacted(info);
+     }
+   }
+ 
+   /**
+    * Find entries in the queue that are ready to
+    * be cleaned.
+    * @return information on the entry in the queue.
+    */
+   @Override
+   @RetrySemantics.ReadOnly
+   public List<CompactionInfo> findReadyToClean() throws MetaException {
+     Connection dbConn = null;
+     List<CompactionInfo> rc = new ArrayList<>();
+ 
+     Statement stmt = null;
+     ResultSet rs = null;
+     try {
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "select cq_id, cq_database, cq_table, cq_partition, "
+                 + "cq_type, cq_run_as, cq_highest_write_id from COMPACTION_QUEUE where cq_state = '"
+                 + READY_FOR_CLEANING + "'";
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         while (rs.next()) {
+           CompactionInfo info = new CompactionInfo();
+           info.id = rs.getLong(1);
+           info.dbname = rs.getString(2);
+           info.tableName = rs.getString(3);
+           info.partName = rs.getString(4);
+           switch (rs.getString(5).charAt(0)) {
+             case MAJOR_TYPE: info.type = CompactionType.MAJOR; break;
+             case MINOR_TYPE: info.type = CompactionType.MINOR; break;
+             default: throw new MetaException("Unexpected compaction type " + rs.getString(5));
+           }
+           info.runAs = rs.getString(6);
+           info.highestWriteId = rs.getLong(7);
+           rc.add(info);
+         }
+         LOG.debug("Going to rollback");
+         dbConn.rollback();
+         return rc;
+       } catch (SQLException e) {
+         LOG.error("Unable to select next element for cleaning, " + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "findReadyToClean");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+       }
+     } catch (RetryException e) {
+       return findReadyToClean();
+     }
+   }
+ 
+   /**
+    * This will remove an entry from the queue after
+    * it has been compacted.
+    * 
+    * @param info info on the compaction entry to remove
+    */
+   @Override
+   @RetrySemantics.CannotRetry
+   public void markCleaned(CompactionInfo info) throws MetaException {
+     try {
+       Connection dbConn = null;
+       PreparedStatement pStmt = null;
+       ResultSet rs = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_WRITE_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = ?");
+         pStmt.setLong(1, info.id);
+         rs = pStmt.executeQuery();
+         if(rs.next()) {
+           info = CompactionInfo.loadFullFromCompactionQueue(rs);
+         }
+         else {
+           throw new IllegalStateException("No record with CQ_ID=" + info.id + " found in COMPACTION_QUEUE");
+         }
+         close(rs);
+         String s = "delete from COMPACTION_QUEUE where cq_id = ?";
+         pStmt = dbConn.prepareStatement(s);
+         pStmt.setLong(1, info.id);
+         LOG.debug("Going to execute update <" + s + ">");
+         int updCount = pStmt.executeUpdate();
+         if (updCount != 1) {
+           LOG.error("Unable to delete compaction record: " + info +  ".  Update count=" + updCount);
+           LOG.debug("Going to rollback");
+           dbConn.rollback();
+         }
+         pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_WRITE_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?)");
+         info.state = SUCCEEDED_STATE;
+         CompactionInfo.insertIntoCompletedCompactions(pStmt, info, getDbTime(dbConn));
+         updCount = pStmt.executeUpdate();
+ 
+         // Remove entries from completed_txn_components as well, so we don't start looking there
+         // again but only up to the highest write ID include in this compaction job.
+         //highestWriteId will be NULL in upgrade scenarios
+         s = "delete from COMPLETED_TXN_COMPONENTS where ctc_database = ? and " +
+             "ctc_table = ?";
+         if (info.partName != null) {
+           s += " and ctc_partition = ?";
+         }
+         if(info.highestWriteId != 0) {
+           s += " and ctc_writeid <= ?";
+         }
+         pStmt = dbConn.prepareStatement(s);
+         int paramCount = 1;
+         pStmt.setString(paramCount++, info.dbname);
+         pStmt.setString(paramCount++, info.tableName);
+         if (info.partName != null) {
+           pStmt.setString(paramCount++, info.partName);
+         }
+         if(info.highestWriteId != 0) {
+           pStmt.setLong(paramCount++, info.highestWriteId);
+         }
+         LOG.debug("Going to execute update <" + s + ">");
+         if (pStmt.executeUpdate() < 1) {
+           LOG.error("Expected to remove at least one row from completed_txn_components when " +
+             "marking compaction entry as clean!");
+         }
+ 
+         s = "select distinct txn_id from TXNS, TXN_COMPONENTS where txn_id = tc_txnid and txn_state = '" +
+           TXN_ABORTED + "' and tc_database = ? and tc_table = ?";
+         if (info.highestWriteId != 0) s += " and tc_writeid <= ?";
+         if (info.partName != null) s += " and tc_partition = ?";
+ 
+         pStmt = dbConn.prepareStatement(s);
+         paramCount = 1;
+         pStmt.setString(paramCount++, info.dbname);
+         pStmt.setString(paramCount++, info.tableName);
+         if(info.highestWriteId != 0) {
+           pStmt.setLong(paramCount++, info.highestWriteId);
+         }
+         if (info.partName != null) {
+           pStmt.setString(paramCount++, info.partName);
+         }
+ 
+         LOG.debug("Going to execute update <" + s + ">");
+         rs = pStmt.executeQuery();
+         List<Long> txnids = new ArrayList<>();
+         List<String> questions = new ArrayList<>();
+         while (rs.next()) {
+           long id = rs.getLong(1);
+           txnids.add(id);
+           questions.add("?");
+         }
+         // Remove entries from txn_components, as there may be aborted txn components
+         if (txnids.size() > 0) {
+           List<String> queries = new ArrayList<>();
+ 
+           // Prepare prefix and suffix
+           StringBuilder prefix = new StringBuilder();
+           StringBuilder suffix = new StringBuilder();
+ 
+           prefix.append("delete from TXN_COMPONENTS where ");
+ 
+           //because 1 txn may include different partitions/tables even in auto commit mode
+           suffix.append(" and tc_database = ?");
+           suffix.append(" and tc_table = ?");
+           if (info.partName != null) {
+             suffix.append(" and tc_partition = ?");
+           }
+ 
+           // Populate the complete query with provided prefix and suffix
+           List<Integer> counts = TxnUtils
+               .buildQueryWithINClauseStrings(conf, queries, prefix, suffix, questions, "tc_txnid",
+                   true, false);
+           int totalCount = 0;
+           for (int i = 0; i < queries.size(); i++) {
+             String query = queries.get(i);
+             int insertCount = counts.get(i);
+ 
+             LOG.debug("Going to execute update <" + query + ">");
+             pStmt = dbConn.prepareStatement(query);
+             for (int j = 0; j < insertCount; j++) {
+               pStmt.setLong(j + 1, txnids.get(totalCount + j));
+             }
+             totalCount += insertCount;
+             paramCount = insertCount + 1;
+             pStmt.setString(paramCount++, info.dbname);
+             pStmt.setString(paramCount++, info.tableName);
+             if (info.partName != null) {
+               pStmt.setString(paramCount++, info.partName);
+             }
+             int rc = pStmt.executeUpdate();
+             LOG.debug("Removed " + rc + " records from txn_components");
+ 
+             // Don't bother cleaning from the txns table.  A separate call will do that.  We don't
+             // know here which txns still have components from other tables or partitions in the
+             // table, so we don't know which ones we can and cannot clean.
+           }
+         }
+ 
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.error("Unable to delete from compaction queue " + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "markCleaned(" + info + ")");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         close(rs, pStmt, dbConn);
+       }
+     } catch (RetryException e) {
+       markCleaned(info);
+     }
+   }
+ 
+   /**
+    * Clean up entries from TXN_TO_WRITE_ID table less than min_uncommited_txnid as found by
+    * min(NEXT_TXN_ID.ntxn_next, min(MIN_HISTORY_LEVEL.mhl_min_open_txnid), min(Aborted TXNS.txn_id)).
+    */
+   @Override
+   @RetrySemantics.SafeToRetry
+   public void cleanTxnToWriteIdTable() throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet rs = null;
+ 
+       try {
+         // We query for minimum values in all the queries and they can only increase by any concurrent
+         // operations. So, READ COMMITTED is sufficient.
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+ 
+         // First need to find the min_uncommitted_txnid which is currently seen by any open transactions.
+         // If there are no txns which are currently open or aborted in the system, then current value of
+         // NEXT_TXN_ID.ntxn_next could be min_uncommitted_txnid.
+         String s = "select ntxn_next from NEXT_TXN_ID";
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (!rs.next()) {
+           throw new MetaException("Transaction tables not properly " +
+                   "initialized, no record found in next_txn_id");
+         }
+         long minUncommittedTxnId = rs.getLong(1);
+ 
+         // If there are any open txns, then the minimum of min_open_txnid from MIN_HISTORY_LEVEL table
+         // could be the min_uncommitted_txnid if lesser than NEXT_TXN_ID.ntxn_next.
+         s = "select min(mhl_min_open_txnid) from MIN_HISTORY_LEVEL";
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (rs.next()) {
+           long minOpenTxnId = rs.getLong(1);
+           if (minOpenTxnId > 0) {
+             minUncommittedTxnId = Math.min(minOpenTxnId, minUncommittedTxnId);
+           }
+         }
+ 
+         // If there are aborted txns, then the minimum aborted txnid could be the min_uncommitted_txnid
+         // if lesser than both NEXT_TXN_ID.ntxn_next and min(MIN_HISTORY_LEVEL .mhl_min_open_txnid).
+         s = "select min(txn_id) from TXNS where txn_state = " + quoteChar(TXN_ABORTED);
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (rs.next()) {
+           long minAbortedTxnId = rs.getLong(1);
+           if (minAbortedTxnId > 0) {
+             minUncommittedTxnId = Math.min(minAbortedTxnId, minUncommittedTxnId);
+           }
+         }
+ 
+         // As all txns below min_uncommitted_txnid are either committed or empty_aborted, we are allowed
+         // to cleanup the entries less than min_uncommitted_txnid from the TXN_TO_WRITE_ID table.
+         s = "delete from TXN_TO_WRITE_ID where t2w_txnid < " + minUncommittedTxnId;
+         LOG.debug("Going to execute delete <" + s + ">");
+         int rc = stmt.executeUpdate(s);
+         LOG.info("Removed " + rc + " rows from TXN_TO_WRITE_ID with Txn Low-Water-Mark: " + minUncommittedTxnId);
+ 
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.error("Unable to delete from txns table " + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "cleanTxnToWriteIdTable");
+         throw new MetaException("Unable to connect to transaction database " +
+                 StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+       }
+     } catch (RetryException e) {
+       cleanTxnToWriteIdTable();
+     }
+   }
+ 
+   /**
+    * Clean up aborted transactions from txns that have no components in txn_components. The reason such
+    * txns exist can be that now work was done in this txn (e.g. Streaming opened TransactionBatch and
+    * abandoned it w/o doing any work) or due to {@link #markCleaned(CompactionInfo)} being called.
+    */
+   @Override
+   @RetrySemantics.SafeToRetry
+   public void cleanEmptyAbortedTxns() throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet rs = null;
+       try {
+         //Aborted is a terminal state, so nothing about the txn can change
+         //after that, so READ COMMITTED is sufficient.
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "select txn_id from TXNS where " +
 -          "txn_id not in (select tc_txnid from TXN_COMPONENTS) and " +
 -          "txn_state = '" + TXN_ABORTED + "'";
++            "txn_id not in (select tc_txnid from TXN_COMPONENTS) and " +
++            "txn_state = '" + TXN_ABORTED + "'";
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         List<Long> txnids = new ArrayList<>();
+         while (rs.next()) txnids.add(rs.getLong(1));
+         close(rs);
+         if(txnids.size() <= 0) {
+           return;
+         }
+         Collections.sort(txnids);//easier to read logs
++
+         List<String> queries = new ArrayList<>();
+         StringBuilder prefix = new StringBuilder();
+         StringBuilder suffix = new StringBuilder();
+ 
++        // Turn off COLUMN_STATS_ACCURATE for txnids' components in TBLS and PARTITIONS
++        prefix.append("select tbl_id from TBLS inner join DBS on TBLS.DB_ID = DBS.DB_ID "
++            + "inner join TXN_TO_WRITE_ID on t2w_database = DBS.NAME and t2w_table = TBLS.TBL_NAME"
++            + " and t2w_writeid = TBLS.WRITE_ID where ");
++        suffix.append("");
++        TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, txnids, "t2w_txnid", true, false);
++
++        // Delete COLUMN_STATS_ACCURATE.BASIC_STATS rows from TABLE_PARAMS for the txnids.
++        List<StringBuilder> finalCommands = new ArrayList<>(queries.size());
++        for (int i = 0; i < queries.size(); i++) {
++          String query = queries.get(i);
++          finalCommands.add(i, new StringBuilder("delete from TABLE_PARAMS " +
++                  " where param_key = '" + "COLUMN_STATS_ACCURATE" + "' and tbl_id in ("));
++          finalCommands.get(i).append(query + ")");
++          LOG.debug("Going to execute update <" + finalCommands.get(i) + ">");
++          int rc = stmt.executeUpdate(finalCommands.get(i).toString());
++          LOG.info("Turned off " + rc + " COLUMN_STATE_ACCURATE.BASIC_STATS states from TBLS");
++        }
++
++        queries.clear();
++        prefix.setLength(0);
++        suffix.setLength(0);
++        finalCommands.clear();
++
++        // Delete COLUMN_STATS_ACCURATE.BASIC_STATS rows from PARTITIONS_PARAMS for the txnids.
++        prefix.append("select part_id from PARTITIONS "
++            + "inner join TBLS on PARTITIONS.TBL_ID = TBLS.TBL_ID "
++            + "inner join DBS on TBLS.DB_ID = DBS.DB_ID "
++            + "inner join TXN_TO_WRITE_ID on t2w_database = DBS.NAME and t2w_table = TBLS.TBL_NAME"
++            + " and t2w_writeid = TBLS.WRITE_ID where ");
++        suffix.append("");
++        TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, txnids, "t2w_txnid", true, false);
++
++        for (int i = 0; i < queries.size(); i++) {
++          String query = queries.get(i);
++          finalCommands.add(i, new StringBuilder("delete from PARTITION_PARAMS " +
++                  " where param_key = '" + "COLUMN_STATS_ACCURATE" + "' and part_id in ("));
++          finalCommands.get(i).append(query + ")");
++          LOG.debug("Going to execute update <" + finalCommands.get(i) + ">");
++          int rc = stmt.executeUpdate(finalCommands.get(i).toString());
++          LOG.info("Turned off " + rc + " COLUMN_STATE_ACCURATE.BASIC_STATS states from PARTITIONS");
++        }
++
++        queries.clear();
++        prefix.setLength(0);
++        suffix.setLength(0);
++        finalCommands.clear();
++
++        // Delete from TXNS.
+         prefix.append("delete from TXNS where ");
+         suffix.append("");
+ 
+         TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, txnids, "txn_id", false, false);
+ 
+         for (String query : queries) {
+           LOG.debug("Going to execute update <" + query + ">");
+           int rc = stmt.executeUpdate(query);
+           LOG.info("Removed " + rc + "  empty Aborted transactions from TXNS");
+         }
+         LOG.info("Aborted transactions removed from TXNS: " + txnids);
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.error("Unable to delete from txns table " + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "cleanEmptyAbortedTxns");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+       }
+     } catch (RetryException e) {
+       cleanEmptyAbortedTxns();
+     }
+   }
+ 
+   /**
+    * This will take all entries assigned to workers
+    * on a host return them to INITIATED state.  The initiator should use this at start up to
+    * clean entries from any workers that were in the middle of compacting when the metastore
+    * shutdown.  It does not reset entries from worker threads on other hosts as those may still
+    * be working.
+    * @param hostname Name of this host.  It is assumed this prefixes the thread's worker id,
+    *                 so that like hostname% will match the worker id.
+    */
+   @Override
+   @RetrySemantics.Idempotent
+   public void revokeFromLocalWorkers(String hostname) throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "update COMPACTION_QUEUE set cq_worker_id = null, cq_start = null, cq_state = '"
+           + INITIATED_STATE+ "' where cq_state = '" + WORKING_STATE + "' and cq_worker_id like '"
+           +  hostname + "%'";
+         LOG.debug("Going to execute update <" + s + ">");
+         // It isn't an error if the following returns no rows, as the local workers could have died
+         // with  nothing assigned to them.
+         stmt.executeUpdate(s);
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.error("Unable to change dead worker's records back to initiated state " +
+           e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "revokeFromLocalWorkers(hostname:" + hostname +")");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         closeStmt(stmt);
+         closeDbConn(dbConn);
+       }
+     } catch (RetryException e) {
+       revokeFromLocalWorkers(hostname);
+     }
+   }
+ 
+   /**
+    * This call will return all compaction queue
+    * entries assigned to a worker but over the timeout back to the initiated state.
+    * This should be called by the initiator on start up and occasionally when running to clean up
+    * after dead threads.  At start up {@link #revokeFromLocalWorkers(String)} should be called
+    * first.
+    * @param timeout number of milliseconds since start time that should elapse before a worker is
+    *                declared dead.
+    */
+   @Override
+   @RetrySemantics.Idempotent
+   public void revokeTimedoutWorkers(long timeout) throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         long latestValidStart = getDbTime(dbConn) - timeout;
+         stmt = dbConn.createStatement();
+         String s = "update COMPACTION_QUEUE set cq_worker_id = null, cq_start = null, cq_state = '"
+           + INITIATED_STATE+ "' where cq_state = '" + WORKING_STATE + "' and cq_start < "
+           +  latestValidStart;
+         LOG.debug("Going to execute update <" + s + ">");
+         // It isn't an error if the following returns no rows, as the local workers could have died
+         // with  nothing assigned to them.
+         stmt.executeUpdate(s);
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.error("Unable to change dead worker's records back to initiated state " +
+           e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "revokeTimedoutWorkers(timeout:" + timeout + ")");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         closeStmt(stmt);
+         closeDbConn(dbConn);
+       }
+     } catch (RetryException e) {
+       revokeTimedoutWorkers(timeout);
+     }
+   }
+ 
+   /**
+    * Queries metastore DB directly to find columns in the table which have statistics information.
+    * If {@code ci} includes partition info then per partition stats info is examined, otherwise
+    * table level stats are examined.
+    * @throws MetaException
+    */
+   @Override
+   @RetrySemantics.ReadOnly
+   public List<String> findColumnsWithStats(CompactionInfo ci) throws MetaException {
+     Connection dbConn = null;
+     PreparedStatement pStmt = null;
+     ResultSet rs = null;
+     try {
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         String quote = getIdentifierQuoteString(dbConn);
+         StringBuilder bldr = new StringBuilder();
+         bldr.append("SELECT ").append(quote).append("COLUMN_NAME").append(quote)
+           .append(" FROM ")
+           .append(quote).append((ci.partName == null ? "TAB_COL_STATS" : "PART_COL_STATS"))
+           .append(quote)
+           .append(" WHERE ")
+           .append(quote).append("DB_NAME").append(quote).append(" = ?")
+           .append(" AND ").append(quote).append("TABLE_NAME").append(quote)
+           .append(" = ?");
+         if (ci.partName != null) {
+           bldr.append(" AND ").append(quote).append("PARTITION_NAME").append(quote).append(" = ?");
+         }
+         String s = bldr.toString();
+         pStmt = dbConn.prepareStatement(s);
+         pStmt.setString(1, ci.dbname);
+         pStmt.setString(2, ci.tableName);
+         if (ci.partName != null) {
+           pStmt.setString(3, ci.partName);
+         }
+ 
+       /*String s = "SELECT COLUMN_NAME FROM " + (ci.partName == null ? "TAB_COL_STATS" :
+           "PART_COL_STATS")
+          + " WHERE DB_NAME='" + ci.dbname + "' AND TABLE_NAME='" + ci.tableName + "'"
+         + (ci.partName == null ? "" : " AND PARTITION_NAME='" + ci.partName + "'");*/
+         LOG.debug("Going to execute <" + s + ">");
+         rs = pStmt.executeQuery();
+         List<String> columns = new ArrayList<>();
+         while (rs.next()) {
+           columns.add(rs.getString(1));
+         }
+         LOG.debug("Found columns to update stats: " + columns + " on " + ci.tableName +
+           (ci.partName == null ? "" : "/" + ci.partName));
+         dbConn.commit();
+         return columns;
+       } catch (SQLException e) {
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "findColumnsWithStats(" + ci.tableName +
+           (ci.partName == null ? "" : "/" + ci.partName) + ")");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         close(rs, pStmt, dbConn);
+       }
+     } catch (RetryException ex) {
+       return findColumnsWithStats(ci);
+     }
+   }
+ 
+   /**
+    * Record the highest txn id that the {@code ci} compaction job will pay attention to.
+    * This is the highest resolved txn id, i.e. such that there are no open txns with lower ids.
+    */
+   @Override
+   @RetrySemantics.Idempotent
+   public void setCompactionHighestWriteId(CompactionInfo ci, long highestWriteId) throws MetaException {
+     Connection dbConn = null;
+     Statement stmt = null;
+     try {
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         int updCount = stmt.executeUpdate("UPDATE COMPACTION_QUEUE SET CQ_HIGHEST_WRITE_ID = " + highestWriteId +
+           " WHERE CQ_ID = " + ci.id);
+         if(updCount != 1) {
+           throw new IllegalStateException("Could not find record in COMPACTION_QUEUE for " + ci);
+         }
+         dbConn.commit();
+       } catch (SQLException e) {
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "setCompactionHighestWriteId(" + ci + "," + highestWriteId + ")");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         close(null, stmt, dbConn);
+       }
+     } catch (RetryException ex) {
+       setCompactionHighestWriteId(ci, highestWriteId);
+     }
+   }
+   private static class RetentionCounters {
+     int attemptedRetention = 0;
+     int failedRetention = 0;
+     int succeededRetention = 0;
+     RetentionCounters(int attemptedRetention, int failedRetention, int succeededRetention) {
+       this.attemptedRetention = attemptedRetention;
+       this.failedRetention = failedRetention;
+       this.succeededRetention = succeededRetention;
+     }
+   }
+   private void checkForDeletion(List<Long> deleteSet, CompactionInfo ci, RetentionCounters rc) {
+     switch (ci.state) {
+       case ATTEMPTED_STATE:
+         if(--rc.attemptedRetention < 0) {
+           deleteSet.add(ci.id);
+         }
+         break;
+       case FAILED_STATE:
+         if(--rc.failedRetention < 0) {
+           deleteSet.add(ci.id);
+         }
+         break;
+       case SUCCEEDED_STATE:
+         if(--rc.succeededRetention < 0) {
+           deleteSet.add(ci.id);
+         }
+         break;
+       default:
+         //do nothing to hanlde future RU/D where we may want to add new state types
+     }
+   }
+ 
+   /**
+    * For any given compactable entity (partition; table if not partitioned) the history of compactions
+    * may look like "sssfffaaasffss", for example.  The idea is to retain the tail (most recent) of the
+    * history such that a configurable number of each type of state is present.  Any other entries
+    * can be purged.  This scheme has advantage of always retaining the last failure/success even if
+    * it's not recent.
+    * @throws MetaException
+    */
+   @Override
+   @RetrySemantics.SafeToRetry
+   public void purgeCompactionHistory() throws MetaException {
+     Connection dbConn = null;
+     Statement stmt = null;
+     PreparedStatement pStmt = null;
+     ResultSet rs = null;
+     List<Long> deleteSet = new ArrayList<>();
+     RetentionCounters rc = null;
+     try {
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         /*cc_id is monotonically increasing so for any entity sorts in order of compaction history,
+         thus this query groups by entity and withing group sorts most recent first*/
+         rs = stmt.executeQuery("select cc_id, cc_database, cc_table, cc_partition, cc_state from " +
+           "COMPLETED_COMPACTIONS order by cc_database, cc_table, cc_partition, cc_id desc");
+         String lastCompactedEntity = null;
+         /*In each group, walk from most recent and count occurences of each state type.  Once you
+         * have counted enough (for each state) to satisfy retention policy, delete all other
+         * instances of this status.*/
+         while(rs.next()) {
+           CompactionInfo ci = new CompactionInfo(rs.getLong(1), rs.getString(2), rs.getString(3), rs.getString(4), rs.getString(5).charAt(0));
+           if(!ci.getFullPartitionName().equals(lastCompactedEntity)) {
+             lastCompactedEntity = ci.getFullPartitionName();
+             rc = new RetentionCounters(MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED),
+               getFailedCompactionRetention(),
+               MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_HISTORY_RETENTION_SUCCEEDED));
+           }
+           checkForDeletion(deleteSet, ci, rc);
+         }
+         close(rs);
+ 
+         if (deleteSet.size() <= 0) {
+           return;
+         }
+ 
+         List<String> queries = new ArrayList<>();
+ 
+         StringBuilder prefix = new StringBuilder();
+         StringBuilder suffix = new StringBuilder();
+ 
+         prefix.append("delete from COMPLETED_COMPACTIONS where ");
+         suffix.append("");
+ 
+         List<String> questions = new ArrayList<>(deleteSet.size());
+         for (int  i = 0; i < deleteSet.size(); i++) {
+           questions.add("?");
+         }
+         List<Integer> counts = TxnUtils.buildQueryWithINClauseStrings(conf, queries, prefix, suffix, questions, "cc_id", false, false);
+         int totalCount = 0;
+         for (int i = 0; i < queries.size(); i++) {
+           String query = queries.get(i);
+           long insertCount = counts.get(i);
+           LOG.debug("Going to execute update <" + query + ">");
+           pStmt = dbConn.prepareStatement(query);
+           for (int j = 0; j < insertCount; j++) {
+             pStmt.setLong(j + 1, deleteSet.get(totalCount + j));
+           }
+           totalCount += insertCount;
+           int count = pStmt.executeUpdate();
+           LOG.debug("Removed " + count + " records from COMPLETED_COMPACTIONS");
+         }
+         dbConn.commit();
+       } catch (SQLException e) {
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "purgeCompactionHistory()");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+         closeStmt(pStmt);
+       }
+     } catch (RetryException ex) {
+       purgeCompactionHistory();
+     }
+   }
+   /**
+    * this ensures that the number of failed compaction entries retained is > than number of failed
+    * compaction threshold which prevents new compactions from being scheduled.
+    */
+   private int getFailedCompactionRetention() {
+     int failedThreshold = MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD);
+     int failedRetention = MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED);
+     if(failedRetention < failedThreshold) {
+       LOG.warn("Invalid configuration " + ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD.getVarname() +
+         "=" + failedRetention + " < " + ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED + "=" +
+         failedRetention + ".  Will use " + ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD.getVarname() +
+         "=" + failedRetention);
+       failedRetention = failedThreshold;
+     }
+     return failedRetention;
+   }
+   /**
+    * Returns {@code true} if there already exists sufficient number of consecutive failures for
+    * this table/partition so that no new automatic compactions will be scheduled.
+    * User initiated compactions don't do this check.
+    *
+    * Do we allow compacting whole table (when it's partitioned)?  No, though perhaps we should.
+    * That would be a meta operations, i.e. first find all partitions for this table (which have 
+    * txn info) and schedule each compaction separately.  This avoids complications in this logic.
+    */
+   @Override
+   @RetrySemantics.ReadOnly
+   public boolean checkFailedCompactions(CompactionInfo ci) throws MetaException {
+     Connection dbConn = null;
+     PreparedStatement pStmt = null;
+     ResultSet rs = null;
+     try {
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         pStmt = dbConn.prepareStatement("select CC_STATE from COMPLETED_COMPACTIONS where " +
+           "CC_DATABASE = ? and " +
+           "CC_TABLE = ? " +
+           (ci.partName != null ? "and CC_PARTITION = ?" : "") +
+           " and CC_STATE != " + quoteChar(ATTEMPTED_STATE) + " order by CC_ID desc");
+         pStmt.setString(1, ci.dbname);
+         pStmt.setString(2, ci.tableName);
+         if (ci.partName != null) {
+           pStmt.setString(3, ci.partName);
+         }
+         rs = pStmt.executeQuery();
+         int numFailed = 0;
+         int numTotal = 0;
+         int failedThreshold = MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD);
+         while(rs.next() && ++numTotal <= failedThreshold) {
+           if(rs.getString(1).charAt(0) == FAILED_STATE) {
+             numFailed++;
+           }
+           else {
+             numFailed--;
+           }
+         }
+         return numFailed == failedThreshold;
+       }
+       catch (SQLException e) {
+         LOG.error("Unable to check for failed compactions " + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "checkFailedCompactions(" + ci + ")");
+         LOG.error("Unable to connect to transaction database " + StringUtils.stringifyException(e));
+         return false;//weren't able to check
+       } finally {
+         close(rs, pStmt, dbConn);
+       }
+     } catch (RetryException e) {
+       return checkFailedCompactions(ci);
+     }
+   }
+   /**
+    * If there is an entry in compaction_queue with ci.id, remove it
+    * Make entry in completed_compactions with status 'f'.
+    * If there is no entry in compaction_queue, it means Initiator failed to even schedule a compaction,
+    * which we record as ATTEMPTED_STATE entry in history.
+    */
+   @Override
+   @RetrySemantics.CannotRetry
+   public void markFailed(CompactionInfo ci) throws MetaException {//todo: this should not throw
+     //todo: this should take "comment" as parameter to set in CC_META_INFO to provide some context for the failure
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       PreparedStatement pStmt = null;
+       ResultSet rs = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_WRITE_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = ?");
+         pStmt.setLong(1, ci.id);
+         rs = pStmt.executeQuery();
+         if(rs.next()) {
+           ci = CompactionInfo.loadFullFromCompactionQueue(rs);
+           String s = "delete from COMPACTION_QUEUE where cq_id = ?";
+           pStmt = dbConn.prepareStatement(s);
+           pStmt.setLong(1, ci.id);
+           LOG.debug("Going to execute update <" + s + ">");
+           int updCnt = pStmt.executeUpdate();
+         }
+         else {
+           if(ci.id > 0) {
+             //the record with valid CQ_ID has disappeared - this is a sign of something wrong
+             throw new IllegalStateException("No record with CQ_ID=" + ci.id + " found in COMPACTION_QUEUE");
+           }
+         }
+         if(ci.id == 0) {
+           //The failure occurred before we even made an entry in COMPACTION_QUEUE
+           //generate ID so that we can make an entry in COMPLETED_COMPACTIONS
+           ci.id = generateCompactionQueueId(stmt);
+           //mostly this indicates that the Initiator is paying attention to some table even though
+           //compactions are not happening.
+           ci.state = ATTEMPTED_STATE;
+           //this is not strictly accurate, but 'type' cannot be null.
+           if(ci.type == null) { ci.type = CompactionType.MINOR; }
+           ci.start = getDbTime(dbConn);
+         }
+         else {
+           ci.state = FAILED_STATE;
+         }
+         close(rs, stmt, null);
+         closeStmt(pStmt);
+ 
+         pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_WRITE_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?)");
+         CompactionInfo.insertIntoCompletedCompactions(pStmt, ci, getDbTime(dbConn));
+         int updCount = pStmt.executeUpdate();
+         LOG.debug("Going to commit");
+         closeStmt(pStmt);
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.warn("markFailed(" + ci.id + "):" + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         try {
+           checkRetryable(dbConn, e, "markFailed(" + ci + ")");
+         }
+         catch(MetaException ex) {
+           LOG.error("Unable to connect to transaction database " + StringUtils.stringifyException(ex));
+         }
+         LOG.error("markFailed(" + ci + ") failed: " + e.getMessage(), e);
+       } finally {
+         close(rs, stmt, null);
+         close(null, pStmt, dbConn);
+       }
+     } catch (RetryException e) {
+       markFailed(ci);
+     }
+   }
+   @Override
+   @RetrySemantics.Idempotent
+   public void setHadoopJobId(String hadoopJobId, long id) {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "update COMPACTION_QUEUE set CQ_HADOOP_JOB_ID = " + quoteString(hadoopJobId) + " WHERE CQ_ID = " + id;
+         LOG.debug("Going to execute <" + s + ">");
+         int updateCount = stmt.executeUpdate(s);
+         LOG.debug("Going to commit");
+         closeStmt(stmt);
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.warn("setHadoopJobId(" + hadoopJobId + "," + id + "):" + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         try {
+           checkRetryable(dbConn, e, "setHadoopJobId(" + hadoopJobId + "," + id + ")");
+         }
+         catch(MetaException ex) {
+           LOG.error("Unable to connect to transaction database " + StringUtils.stringifyException(ex));
+         }
+         LOG.error("setHadoopJobId(" + hadoopJobId + "," + id + ") failed: " + e.getMessage(), e);
+       } finally {
+         close(null, stmt, dbConn);
+       }
+     } catch (RetryException e) {
+       setHadoopJobId(hadoopJobId, id);
+     }
+   }
+ }
+ 
+ 


[44/50] [abbrv] hive git commit: HIVE-19532 : fix tests for master-txnstats branch - fix build (Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-19532 : fix tests for master-txnstats branch - fix build (Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e8d7cdcc
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e8d7cdcc
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e8d7cdcc

Branch: refs/heads/master
Commit: e8d7cdcc372e14f8a0a664911b5ae6934201e30b
Parents: b17a347
Author: sergey <se...@apache.org>
Authored: Sun Jul 22 21:20:46 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Sun Jul 22 21:20:46 2018 -0700

----------------------------------------------------------------------
 .../hcatalog/listener/DummyRawStoreFailEvent.java   | 16 ++++++++--------
 .../hadoop/hive/ql/stats/StatsUpdaterThread.java    |  4 ++--
 2 files changed, 10 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/e8d7cdcc/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
----------------------------------------------------------------------
diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index 1c105d1..be40395 100644
--- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -321,10 +321,10 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
   }
 
   @Override
-  public void alterTable(String catName, String dbName, String name, Table newTable, String queryValidWriteIds)
+  public Table alterTable(String catName, String dbName, String name, Table newTable, String queryValidWriteIds)
       throws InvalidObjectException, MetaException {
     if (shouldEventSucceed) {
-      objectStore.alterTable(catName, dbName, name, newTable, queryValidWriteIds);
+      return objectStore.alterTable(catName, dbName, name, newTable, queryValidWriteIds);
     } else {
       throw new RuntimeException("Event failed.");
     }
@@ -385,22 +385,22 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
   }
 
   @Override
-  public void alterPartition(String catName, String dbName, String tblName, List<String> partVals,
+  public Partition alterPartition(String catName, String dbName, String tblName, List<String> partVals,
                              Partition newPart, String queryValidWriteIds) throws InvalidObjectException, MetaException {
     if (shouldEventSucceed) {
-      objectStore.alterPartition(catName, dbName, tblName, partVals, newPart, queryValidWriteIds);
+      return objectStore.alterPartition(catName, dbName, tblName, partVals, newPart, queryValidWriteIds);
     } else {
       throw new RuntimeException("Event failed.");
     }
   }
 
   @Override
-  public void alterPartitions(String catName, String dbName, String tblName,
+  public List<Partition> alterPartitions(String catName, String dbName, String tblName,
                               List<List<String>> partValsList, List<Partition> newParts,
                               long writeId, String queryValidWriteIds)
       throws InvalidObjectException, MetaException {
     if (shouldEventSucceed) {
-      objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts, writeId, queryValidWriteIds);
+      return objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts, writeId, queryValidWriteIds);
     } else {
       throw new RuntimeException("Event failed.");
     }
@@ -736,13 +736,13 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
   }
 
   @Override
-  public boolean updateTableColumnStatistics(ColumnStatistics statsObj, String validWriteIds, long writeId)
+  public Map<String, String> updateTableColumnStatistics(ColumnStatistics statsObj, String validWriteIds, long writeId)
       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
     return objectStore.updateTableColumnStatistics(statsObj, validWriteIds, writeId);
   }
 
   @Override
-  public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,
+  public Map<String, String> updatePartitionColumnStatistics(ColumnStatistics statsObj,
       List<String> partVals, String validWriteIds, long writeId)
       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
     return objectStore.updatePartitionColumnStatistics(statsObj, partVals, validWriteIds, writeId);

http://git-wip-us.apache.org/repos/asf/hive/blob/e8d7cdcc/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java
index f34cb61..a50ec18 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java
@@ -447,7 +447,7 @@ public class StatsUpdaterThread extends Thread implements MetaStoreThread {
     }
     // TODO: we should probably skip updating if writeId is from an active txn
     boolean isTxnValid = (writeIdString == null) || ObjectStore.isCurrentStatsValidForTheQuery(
-        conf, db, tbl, params, statsWriteId , writeIdString, false);
+        conf, params, statsWriteId , writeIdString, false);
     return getExistingStatsToUpdate(existingStats, params, isTxnValid);
   }
 
@@ -472,7 +472,7 @@ public class StatsUpdaterThread extends Thread implements MetaStoreThread {
     }
     // TODO: we should probably skip updating if writeId is from an active txn
     if (writeIdString != null && !ObjectStore.isCurrentStatsValidForTheQuery(
-        conf, db, tbl, params, statsWriteId, writeIdString, false)) {
+        conf, params, statsWriteId, writeIdString, false)) {
       return allCols;
     }
     List<String> colsToUpdate = new ArrayList<>();


[40/50] [abbrv] hive git commit: HIVE-19532 : fix tests for master-txnstats branch - more tests (Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-19532 : fix tests for master-txnstats branch - more tests (Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ba083ed4
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ba083ed4
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ba083ed4

Branch: refs/heads/master
Commit: ba083ed43adb89b738387c900c822548e348ccd0
Parents: 5c8b5d8
Author: sergey <se...@apache.org>
Authored: Sat Jul 21 13:42:32 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Sat Jul 21 13:42:32 2018 -0700

----------------------------------------------------------------------
 .../metastore/client/TestAlterPartitions.java   | 26 ++++++++++++++------
 1 file changed, 19 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ba083ed4/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
index f3e0ba4..4fc3688 100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
@@ -1050,14 +1050,18 @@ public class TestAlterPartitions extends MetaStoreClientTest {
     client.renamePartition(DB_NAME, TABLE_NAME, Lists.newArrayList(), partToRename);
   }
 
-  @Test(expected = InvalidOperationException.class)
+  @Test
   public void testRenamePartitionNullOldPartList() throws Exception {
     createTable4PartColsParts(client);
     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
 
     Partition partToRename = oldParts.get(3);
     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
-    client.renamePartition(DB_NAME, TABLE_NAME, null, partToRename);
+    try {
+      client.renamePartition(DB_NAME, TABLE_NAME, null, partToRename);
+      Assert.fail("should throw");
+    } catch (InvalidOperationException | TProtocolException ex) {
+    }
   }
 
   @Test
@@ -1069,7 +1073,7 @@ public class TestAlterPartitions extends MetaStoreClientTest {
       Partition partToRename = oldParts.get(3);
       partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
       client.renamePartition(DB_NAME, TABLE_NAME, oldValues.get(3), null);
-    } catch (NullPointerException | TTransportException e) {
+    } catch (NullPointerException | TProtocolException e) {
     }
   }
 
@@ -1103,24 +1107,32 @@ public class TestAlterPartitions extends MetaStoreClientTest {
     client.renamePartition(DB_NAME, "", oldValues.get(3), partToRename);
   }
 
-  @Test(expected = MetaException.class)
+  @Test
   public void testRenamePartitionNullDbName() throws Exception {
     List<List<String>> oldValues = createTable4PartColsParts(client);
     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
 
     Partition partToRename = oldParts.get(3);
     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
-    client.renamePartition(null, TABLE_NAME, oldValues.get(3), partToRename);
+    try {
+      client.renamePartition(null, TABLE_NAME, oldValues.get(3), partToRename);
+      Assert.fail("should throw");
+    } catch (MetaException | TProtocolException ex) {
+    }
   }
 
-  @Test(expected = MetaException.class)
+  @Test
   public void testRenamePartitionNullTblName() throws Exception {
     List<List<String>> oldValues = createTable4PartColsParts(client);
     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
 
     Partition partToRename = oldParts.get(3);
     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
-    client.renamePartition(DB_NAME, null, oldValues.get(3), partToRename);
+    try {
+      client.renamePartition(DB_NAME, null, oldValues.get(3), partToRename);
+      Assert.fail("should throw");
+    } catch (MetaException | TProtocolException ex) {
+    }
   }
 
   @Test(expected = MetaException.class)


[13/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
index 0000000,5ba71c4..878530a
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
@@@ -1,0 -1,720 +1,720 @@@
+ -- Timestamp: 2011-09-22 15:32:02.024
+ -- Source database is: /home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb
+ -- Connection URL is: jdbc:derby:/home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb
+ -- Specified schema is: APP
+ -- appendLogs: false
+ 
+ -- ----------------------------------------------
+ -- DDL Statements for functions
+ -- ----------------------------------------------
+ 
+ CREATE FUNCTION "APP"."NUCLEUS_ASCII" (C CHAR(1)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.ascii' ;
+ 
+ CREATE FUNCTION "APP"."NUCLEUS_MATCHES" (TEXT VARCHAR(8000),PATTERN VARCHAR(8000)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.matches' ;
+ 
+ -- ----------------------------------------------
+ -- DDL Statements for tables
+ -- ----------------------------------------------
+ CREATE TABLE "APP"."DBS" (
+   "DB_ID" BIGINT NOT NULL,
+   "DESC" VARCHAR(4000),
+   "DB_LOCATION_URI" VARCHAR(4000) NOT NULL,
+   "NAME" VARCHAR(128),
+   "OWNER_NAME" VARCHAR(128),
+   "OWNER_TYPE" VARCHAR(10),
+   "CTLG_NAME" VARCHAR(256) NOT NULL
+ );
+ 
+ CREATE TABLE "APP"."TBL_PRIVS" ("TBL_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_PRIV" VARCHAR(128), "TBL_ID" BIGINT, "AUTHORIZER" VARCHAR(128));
+ 
+ CREATE TABLE "APP"."DATABASE_PARAMS" ("DB_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(180) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+ 
+ CREATE TABLE "APP"."TBL_COL_PRIVS" ("TBL_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_COL_PRIV" VARCHAR(128), "TBL_ID" BIGINT, "AUTHORIZER" VARCHAR(128));
+ 
+ CREATE TABLE "APP"."SERDE_PARAMS" ("SERDE_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
+ 
+ CREATE TABLE "APP"."COLUMNS_V2" ("CD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(4000), "COLUMN_NAME" VARCHAR(767) NOT NULL, "TYPE_NAME" CLOB, "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."SORT_COLS" ("SD_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "ORDER" INTEGER NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."CDS" ("CD_ID" BIGINT NOT NULL);
+ 
+ CREATE TABLE "APP"."PARTITION_KEY_VALS" ("PART_ID" BIGINT NOT NULL, "PART_KEY_VAL" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."DB_PRIVS" ("DB_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "DB_PRIV" VARCHAR(128), "AUTHORIZER" VARCHAR(128));
+ 
+ CREATE TABLE "APP"."IDXS" ("INDEX_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DEFERRED_REBUILD" CHAR(1) NOT NULL, "INDEX_HANDLER_CLASS" VARCHAR(4000), "INDEX_NAME" VARCHAR(128), "INDEX_TBL_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "ORIG_TBL_ID" BIGINT, "SD_ID" BIGINT);
+ 
+ CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+ 
 -CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT);
++CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT, "WRITE_ID" BIGINT DEFAULT 0);
+ 
+ CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000), "DESCRIPTION" VARCHAR(4000), "SERIALIZER_CLASS" VARCHAR(4000), "DESERIALIZER_CLASS" VARCHAR(4000), SERDE_TYPE INTEGER);
+ 
+ CREATE TABLE "APP"."PART_PRIVS" ("PART_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_PRIV" VARCHAR(128), "AUTHORIZER" VARCHAR(128));
+ 
+ CREATE TABLE "APP"."ROLE_MAP" ("ROLE_GRANT_ID" BIGINT NOT NULL, "ADD_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "ROLE_ID" BIGINT);
+ 
+ CREATE TABLE "APP"."TYPES" ("TYPES_ID" BIGINT NOT NULL, "TYPE_NAME" VARCHAR(128), "TYPE1" VARCHAR(767), "TYPE2" VARCHAR(767));
+ 
+ CREATE TABLE "APP"."GLOBAL_PRIVS" ("USER_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "USER_PRIV" VARCHAR(128), "AUTHORIZER" VARCHAR(128));
+ 
+ CREATE TABLE "APP"."PARTITION_PARAMS" ("PART_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+ 
+ CREATE TABLE "APP"."PARTITION_EVENTS" (
+     "PART_NAME_ID" BIGINT NOT NULL,
+     "CAT_NAME" VARCHAR(256),
+     "DB_NAME" VARCHAR(128),
+     "EVENT_TIME" BIGINT NOT NULL,
+     "EVENT_TYPE" INTEGER NOT NULL,
+     "PARTITION_NAME" VARCHAR(767),
+     "TBL_NAME" VARCHAR(256)
+ );
+ 
+ CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "COLUMN_NAME" VARCHAR(128) NOT NULL, "TYPE_NAME" VARCHAR(4000) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128));
+ 
 -CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N');
++CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N',  "WRITE_ID" BIGINT DEFAULT 0);
+ 
+ CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."PART_COL_PRIVS" ("PART_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_COL_PRIV" VARCHAR(128), "AUTHORIZER" VARCHAR(128));
+ 
+ CREATE TABLE "APP"."SDS" ("SD_ID" BIGINT NOT NULL, "INPUT_FORMAT" VARCHAR(4000), "IS_COMPRESSED" CHAR(1) NOT NULL, "LOCATION" VARCHAR(4000), "NUM_BUCKETS" INTEGER NOT NULL, "OUTPUT_FORMAT" VARCHAR(4000), "SERDE_ID" BIGINT, "CD_ID" BIGINT, "IS_STOREDASSUBDIRECTORIES" CHAR(1) NOT NULL);
+ 
+ CREATE TABLE "APP"."SEQUENCE_TABLE" ("SEQUENCE_NAME" VARCHAR(256) NOT NULL, "NEXT_VAL" BIGINT NOT NULL);
+ 
+ CREATE TABLE "APP"."TAB_COL_STATS"(
+     "CAT_NAME" VARCHAR(256) NOT NULL,
+     "DB_NAME" VARCHAR(128) NOT NULL,
+     "TABLE_NAME" VARCHAR(256) NOT NULL,
+     "COLUMN_NAME" VARCHAR(767) NOT NULL,
+     "COLUMN_TYPE" VARCHAR(128) NOT NULL,
+     "LONG_LOW_VALUE" BIGINT,
+     "LONG_HIGH_VALUE" BIGINT,
+     "DOUBLE_LOW_VALUE" DOUBLE,
+     "DOUBLE_HIGH_VALUE" DOUBLE,
+     "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000),
+     "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),
+     "NUM_DISTINCTS" BIGINT,
+     "NUM_NULLS" BIGINT NOT NULL,
+     "AVG_COL_LEN" DOUBLE,
+     "MAX_COL_LEN" BIGINT,
+     "NUM_TRUES" BIGINT,
+     "NUM_FALSES" BIGINT,
+     "LAST_ANALYZED" BIGINT,
+     "CS_ID" BIGINT NOT NULL,
+     "TBL_ID" BIGINT NOT NULL,
+     "BIT_VECTOR" BLOB
+ );
+ 
+ CREATE TABLE "APP"."TABLE_PARAMS" ("TBL_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
+ 
+ CREATE TABLE "APP"."BUCKETING_COLS" ("SD_ID" BIGINT NOT NULL, "BUCKET_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."TYPE_FIELDS" ("TYPE_NAME" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "FIELD_NAME" VARCHAR(128) NOT NULL, "FIELD_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."NUCLEUS_TABLES" ("CLASS_NAME" VARCHAR(128) NOT NULL, "TABLE_NAME" VARCHAR(128) NOT NULL, "TYPE" VARCHAR(4) NOT NULL, "OWNER" VARCHAR(2) NOT NULL, "VERSION" VARCHAR(20) NOT NULL, "INTERFACE_NAME" VARCHAR(256) DEFAULT NULL);
+ 
+ CREATE TABLE "APP"."SD_PARAMS" ("SD_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
+ 
+ CREATE TABLE "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID" BIGINT NOT NULL);
+ 
+ CREATE TABLE "APP"."SKEWED_STRING_LIST_VALUES" ("STRING_LIST_ID" BIGINT NOT NULL, "STRING_LIST_VALUE" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."SKEWED_COL_NAMES" ("SD_ID" BIGINT NOT NULL, "SKEWED_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ("SD_ID" BIGINT NOT NULL, "STRING_LIST_ID_KID" BIGINT NOT NULL, "LOCATION" VARCHAR(4000));
+ 
+ CREATE TABLE "APP"."SKEWED_VALUES" ("SD_ID_OID" BIGINT NOT NULL, "STRING_LIST_ID_EID" BIGINT NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."MASTER_KEYS" ("KEY_ID" INTEGER NOT NULL generated always as identity (start with 1), "MASTER_KEY" VARCHAR(767));
+ 
+ CREATE TABLE "APP"."DELEGATION_TOKENS" ( "TOKEN_IDENT" VARCHAR(767) NOT NULL, "TOKEN" VARCHAR(767));
+ 
+ CREATE TABLE "APP"."PART_COL_STATS"(
+     "CAT_NAME" VARCHAR(256) NOT NULL,
+     "DB_NAME" VARCHAR(128) NOT NULL,
+     "TABLE_NAME" VARCHAR(256) NOT NULL,
+     "PARTITION_NAME" VARCHAR(767) NOT NULL,
+     "COLUMN_NAME" VARCHAR(767) NOT NULL,
+     "COLUMN_TYPE" VARCHAR(128) NOT NULL,
+     "LONG_LOW_VALUE" BIGINT,
+     "LONG_HIGH_VALUE" BIGINT,
+     "DOUBLE_LOW_VALUE" DOUBLE,
+     "DOUBLE_HIGH_VALUE" DOUBLE,
+     "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000),
+     "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),
+     "NUM_DISTINCTS" BIGINT,
+     "BIT_VECTOR" BLOB,
+     "NUM_NULLS" BIGINT NOT NULL,
+     "AVG_COL_LEN" DOUBLE,
+     "MAX_COL_LEN" BIGINT,
+     "NUM_TRUES" BIGINT,
+     "NUM_FALSES" BIGINT,
+     "LAST_ANALYZED" BIGINT,
+     "CS_ID" BIGINT NOT NULL,
+     "PART_ID" BIGINT NOT NULL
+ );
+ 
+ CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255));
+ 
+ CREATE TABLE "APP"."FUNCS" ("FUNC_ID" BIGINT NOT NULL, "CLASS_NAME" VARCHAR(4000), "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "FUNC_NAME" VARCHAR(128), "FUNC_TYPE" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "OWNER_TYPE" VARCHAR(10));
+ 
+ CREATE TABLE "APP"."FUNC_RU" ("FUNC_ID" BIGINT NOT NULL, "RESOURCE_TYPE" INTEGER NOT NULL, "RESOURCE_URI" VARCHAR(4000), "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."NOTIFICATION_LOG" (
+     "NL_ID" BIGINT NOT NULL,
+     "CAT_NAME" VARCHAR(256),
+     "DB_NAME" VARCHAR(128),
+     "EVENT_ID" BIGINT NOT NULL,
+     "EVENT_TIME" INTEGER NOT NULL,
+     "EVENT_TYPE" VARCHAR(32) NOT NULL,
+     "MESSAGE" CLOB,
+     "TBL_NAME" VARCHAR(256),
+     "MESSAGE_FORMAT" VARCHAR(16)
+ );
+ 
+ CREATE TABLE "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID" BIGINT NOT NULL, "NEXT_EVENT_ID" BIGINT NOT NULL);
+ 
+ CREATE TABLE "APP"."KEY_CONSTRAINTS" ("CHILD_CD_ID" BIGINT, "CHILD_INTEGER_IDX" INTEGER, "CHILD_TBL_ID" BIGINT, "PARENT_CD_ID" BIGINT , "PARENT_INTEGER_IDX" INTEGER, "PARENT_TBL_ID" BIGINT NOT NULL,  "POSITION" BIGINT NOT NULL, "CONSTRAINT_NAME" VARCHAR(400) NOT NULL, "CONSTRAINT_TYPE" SMALLINT NOT NULL, "UPDATE_RULE" SMALLINT, "DELETE_RULE" SMALLINT, "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL, "DEFAULT_VALUE" VARCHAR(400));
+ 
+ CREATE TABLE "APP"."METASTORE_DB_PROPERTIES" ("PROPERTY_KEY" VARCHAR(255) NOT NULL, "PROPERTY_VALUE" VARCHAR(1000) NOT NULL, "DESCRIPTION" VARCHAR(1000));
+ 
+ CREATE TABLE "APP"."WM_RESOURCEPLAN" (RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, QUERY_PARALLELISM INTEGER, STATUS VARCHAR(20) NOT NULL, DEFAULT_POOL_ID BIGINT);
+ 
+ CREATE TABLE "APP"."WM_POOL" (POOL_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, PATH VARCHAR(1024) NOT NULL, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM INTEGER, SCHEDULING_POLICY VARCHAR(1024));
+ 
+ CREATE TABLE "APP"."WM_TRIGGER" (TRIGGER_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, TRIGGER_EXPRESSION VARCHAR(1024), ACTION_EXPRESSION VARCHAR(1024), IS_IN_UNMANAGED INTEGER NOT NULL DEFAULT 0);
+ 
+ CREATE TABLE "APP"."WM_POOL_TO_TRIGGER"  (POOL_ID BIGINT NOT NULL, TRIGGER_ID BIGINT NOT NULL);
+ 
+ CREATE TABLE "APP"."WM_MAPPING" (MAPPING_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, ENTITY_TYPE VARCHAR(128) NOT NULL, ENTITY_NAME VARCHAR(128) NOT NULL, POOL_ID BIGINT, ORDERING INTEGER);
+ 
+ CREATE TABLE "APP"."MV_CREATION_METADATA" (
+   "MV_CREATION_METADATA_ID" BIGINT NOT NULL,
+   "CAT_NAME" VARCHAR(256) NOT NULL,
+   "DB_NAME" VARCHAR(128) NOT NULL,
+   "TBL_NAME" VARCHAR(256) NOT NULL,
+   "TXN_LIST" CLOB,
+   "MATERIALIZATION_TIME" BIGINT NOT NULL
+ );
+ 
+ CREATE TABLE "APP"."MV_TABLES_USED" (
+   "MV_CREATION_METADATA_ID" BIGINT NOT NULL,
+   "TBL_ID" BIGINT NOT NULL
+ );
+ 
+ CREATE TABLE "APP"."CTLGS" (
+     "CTLG_ID" BIGINT NOT NULL,
+     "NAME" VARCHAR(256) UNIQUE,
+     "DESC" VARCHAR(4000),
+     "LOCATION_URI" VARCHAR(4000) NOT NULL);
+ 
+ -- ----------------------------------------------
+ -- DML Statements
+ -- ----------------------------------------------
+ 
+ INSERT INTO "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID", "NEXT_EVENT_ID") SELECT * FROM (VALUES (1,1)) tmp_table WHERE NOT EXISTS ( SELECT "NEXT_EVENT_ID" FROM "APP"."NOTIFICATION_SEQUENCE");
+ 
+ INSERT INTO "APP"."SEQUENCE_TABLE" ("SEQUENCE_NAME", "NEXT_VAL") SELECT * FROM (VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1)) tmp_table WHERE NOT EXISTS ( SELECT "NEXT_VAL" FROM "APP"."SEQUENCE_TABLE" WHERE "SEQUENCE_NAME" = 'org.apache.hadoop.hive.metastore.model.MNotificationLog');
+ 
+ -- ----------------------------------------------
+ -- DDL Statements for indexes
+ -- ----------------------------------------------
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUEINDEX" ON "APP"."IDXS" ("INDEX_NAME", "ORIG_TBL_ID");
+ 
+ CREATE INDEX "APP"."TABLECOLUMNPRIVILEGEINDEX" ON "APP"."TBL_COL_PRIVS" ("AUTHORIZER", "TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ CREATE UNIQUE INDEX "APP"."DBPRIVILEGEINDEX" ON "APP"."DB_PRIVS" ("AUTHORIZER", "DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" ("CAT_NAME", "DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME");
+ 
+ CREATE INDEX "APP"."TAB_COL_STATS_IDX" ON "APP"."TAB_COL_STATS" ("CAT_NAME", "DB_NAME", "TABLE_NAME", "COLUMN_NAME");
+ 
+ CREATE INDEX "APP"."PARTPRIVILEGEINDEX" ON "APP"."PART_PRIVS" ("AUTHORIZER", "PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ CREATE UNIQUE INDEX "APP"."ROLEENTITYINDEX" ON "APP"."ROLES" ("ROLE_NAME");
+ 
+ CREATE INDEX "APP"."TABLEPRIVILEGEINDEX" ON "APP"."TBL_PRIVS" ("AUTHORIZER", "TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUETABLE" ON "APP"."TBLS" ("TBL_NAME", "DB_ID");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUE_DATABASE" ON "APP"."DBS" ("NAME", "CTLG_NAME");
+ 
+ CREATE UNIQUE INDEX "APP"."USERROLEMAPINDEX" ON "APP"."ROLE_MAP" ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
+ 
+ CREATE UNIQUE INDEX "APP"."GLOBALPRIVILEGEINDEX" ON "APP"."GLOBAL_PRIVS" ("AUTHORIZER", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUE_TYPE" ON "APP"."TYPES" ("TYPE_NAME");
+ 
+ CREATE INDEX "APP"."PARTITIONCOLUMNPRIVILEGEINDEX" ON "APP"."PART_COL_PRIVS" ("AUTHORIZER", "PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUEPARTITION" ON "APP"."PARTITIONS" ("PART_NAME", "TBL_ID");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUEFUNCTION" ON "APP"."FUNCS" ("FUNC_NAME", "DB_ID");
+ 
+ CREATE INDEX "APP"."FUNCS_N49" ON "APP"."FUNCS" ("DB_ID");
+ 
+ CREATE INDEX "APP"."FUNC_RU_N49" ON "APP"."FUNC_RU" ("FUNC_ID");
+ 
+ CREATE INDEX "APP"."CONSTRAINTS_PARENT_TBL_ID_INDEX" ON "APP"."KEY_CONSTRAINTS"("PARENT_TBL_ID");
+ 
+ CREATE INDEX "APP"."CONSTRAINTS_CONSTRAINT_TYPE_INDEX" ON "APP"."KEY_CONSTRAINTS"("CONSTRAINT_TYPE");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUE_WM_RESOURCEPLAN" ON "APP"."WM_RESOURCEPLAN" ("NAME");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUE_WM_POOL" ON "APP"."WM_POOL" ("RP_ID", "PATH");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUE_WM_TRIGGER" ON "APP"."WM_TRIGGER" ("RP_ID", "NAME");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUE_WM_MAPPING" ON "APP"."WM_MAPPING" ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME");
+ 
+ CREATE UNIQUE INDEX "APP"."MV_UNIQUE_TABLE" ON "APP"."MV_CREATION_METADATA" ("TBL_NAME", "DB_NAME");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUE_CATALOG" ON "APP"."CTLGS" ("NAME");
+ 
+ 
+ -- ----------------------------------------------
+ -- DDL Statements for keys
+ -- ----------------------------------------------
+ 
+ -- primary/unique
+ ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_PK" PRIMARY KEY ("INDEX_ID");
+ 
+ ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_PK" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
+ 
+ ALTER TABLE "APP"."CDS" ADD CONSTRAINT "SQL110922153006460" PRIMARY KEY ("CD_ID");
+ 
+ ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_PK" PRIMARY KEY ("DB_GRANT_ID");
+ 
+ ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_PK" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
+ 
+ ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEY_PK" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
+ 
+ ALTER TABLE "APP"."SEQUENCE_TABLE" ADD CONSTRAINT "SEQUENCE_TABLE_PK" PRIMARY KEY ("SEQUENCE_NAME");
+ 
+ ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_PK" PRIMARY KEY ("PART_GRANT_ID");
+ 
+ ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_PK" PRIMARY KEY ("SD_ID");
+ 
+ ALTER TABLE "APP"."SERDES" ADD CONSTRAINT "SERDES_PK" PRIMARY KEY ("SERDE_ID");
+ 
+ ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_PK" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
+ 
+ ALTER TABLE "APP"."PARTITION_EVENTS" ADD CONSTRAINT "PARTITION_EVENTS_PK" PRIMARY KEY ("PART_NAME_ID");
+ 
+ ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_PK" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
+ 
+ ALTER TABLE "APP"."ROLES" ADD CONSTRAINT "ROLES_PK" PRIMARY KEY ("ROLE_ID");
+ 
+ ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_PK" PRIMARY KEY ("TBL_GRANT_ID");
+ 
+ ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_PK" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
+ 
+ ALTER TABLE "APP"."NUCLEUS_TABLES" ADD CONSTRAINT "NUCLEUS_TABLES_PK" PRIMARY KEY ("CLASS_NAME");
+ 
+ ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_PK" PRIMARY KEY ("TBL_ID");
+ 
+ ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_PK" PRIMARY KEY ("SD_ID", "PARAM_KEY");
+ 
+ ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_PK" PRIMARY KEY ("DB_ID", "PARAM_KEY");
+ 
+ ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_PK" PRIMARY KEY ("DB_ID");
+ 
+ ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_PK" PRIMARY KEY ("ROLE_GRANT_ID");
+ 
+ ALTER TABLE "APP"."GLOBAL_PRIVS" ADD CONSTRAINT "GLOBAL_PRIVS_PK" PRIMARY KEY ("USER_GRANT_ID");
+ 
+ ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+ 
+ ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+ 
+ ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_PK" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
+ 
+ ALTER TABLE "APP"."TYPES" ADD CONSTRAINT "TYPES_PK" PRIMARY KEY ("TYPES_ID");
+ 
+ ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "SQL110922153006740" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
+ 
+ ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_PK" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
+ 
+ ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_PK" PRIMARY KEY ("PART_ID", "PARAM_KEY");
+ 
+ ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_PK" PRIMARY KEY ("PART_ID");
+ 
+ ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_PK" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
+ 
+ ALTER TABLE "APP"."SKEWED_STRING_LIST" ADD CONSTRAINT "SKEWED_STRING_LIST_PK" PRIMARY KEY ("STRING_LIST_ID");
+ 
+ ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_PK" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
+ 
+ ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+ 
+ ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_PK" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
+ 
+ ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_PK" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
+ 
+ ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_PK" PRIMARY KEY ("CS_ID");
+ 
+ ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_PK" PRIMARY KEY ("CS_ID");
+ 
+ ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_PK" PRIMARY KEY ("FUNC_ID");
+ 
+ ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_PK" PRIMARY KEY ("FUNC_ID", "INTEGER_IDX");
+ 
+ ALTER TABLE "APP"."NOTIFICATION_LOG" ADD CONSTRAINT "NOTIFICATION_LOG_PK" PRIMARY KEY ("NL_ID");
+ 
+ ALTER TABLE "APP"."NOTIFICATION_SEQUENCE" ADD CONSTRAINT "NOTIFICATION_SEQUENCE_PK" PRIMARY KEY ("NNI_ID");
+ 
+ ALTER TABLE "APP"."KEY_CONSTRAINTS" ADD CONSTRAINT "CONSTRAINTS_PK" PRIMARY KEY ("CONSTRAINT_NAME", "POSITION");
+ 
+ ALTER TABLE "APP"."METASTORE_DB_PROPERTIES" ADD CONSTRAINT "PROPERTY_KEY_PK" PRIMARY KEY ("PROPERTY_KEY");
+ 
+ ALTER TABLE "APP"."MV_CREATION_METADATA" ADD CONSTRAINT "MV_CREATION_METADATA_PK" PRIMARY KEY ("MV_CREATION_METADATA_ID");
+ 
+ ALTER TABLE "APP"."CTLGS" ADD CONSTRAINT "CTLG_PK" PRIMARY KEY ("CTLG_ID");
+ 
+ 
+ -- foreign
+ ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK1" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK3" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_FK1" FOREIGN KEY ("INDEX_ID") REFERENCES "APP"."IDXS" ("INDEX_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEYS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK2" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_FK1" FOREIGN KEY ("TYPE_NAME") REFERENCES "APP"."TYPES" ("TYPES_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES "APP"."CTLGS" ("NAME") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_FK1" FOREIGN KEY ("ROLE_ID") REFERENCES "APP"."ROLES" ("ROLE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "COLUMNS_V2_FK1" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_FK1" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK1" FOREIGN KEY ("SD_ID_OID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK2" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_FK" FOREIGN KEY ("TBL_ID") REFERENCES TBLS("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_FK" FOREIGN KEY ("PART_ID") REFERENCES PARTITIONS("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."VERSION" ADD CONSTRAINT "VERSION_PK" PRIMARY KEY ("VER_ID");
+ 
+ ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "APP"."FUNCS" ("FUNC_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_PK" PRIMARY KEY ("RP_ID");
+ 
+ ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_PK" PRIMARY KEY ("POOL_ID");
+ 
+ ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_FK1" FOREIGN KEY ("DEFAULT_POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_PK" PRIMARY KEY ("TRIGGER_ID");
+ 
+ ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK1" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK2" FOREIGN KEY ("TRIGGER_ID") REFERENCES "APP"."WM_TRIGGER" ("TRIGGER_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_PK" PRIMARY KEY ("MAPPING_ID");
+ 
+ ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK2" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."MV_TABLES_USED" ADD CONSTRAINT "MV_TABLES_USED_FK1" FOREIGN KEY ("MV_CREATION_METADATA_ID") REFERENCES "APP"."MV_CREATION_METADATA" ("MV_CREATION_METADATA_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."MV_TABLES_USED" ADD CONSTRAINT "MV_TABLES_USED_FK2" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_CTLG_FK" FOREIGN KEY ("CTLG_NAME") REFERENCES "APP"."CTLGS" ("NAME") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ -- ----------------------------------------------
+ -- DDL Statements for checks
+ -- ----------------------------------------------
+ 
+ ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "SQL110318025504980" CHECK (DEFERRED_REBUILD IN ('Y','N'));
+ 
+ ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SQL110318025505550" CHECK (IS_COMPRESSED IN ('Y','N'));
+ 
+ -- ----------------------------
+ -- Transaction and Lock Tables
+ -- ----------------------------
+ CREATE TABLE TXNS (
+   TXN_ID bigint PRIMARY KEY,
+   TXN_STATE char(1) NOT NULL,
+   TXN_STARTED bigint NOT NULL,
+   TXN_LAST_HEARTBEAT bigint NOT NULL,
+   TXN_USER varchar(128) NOT NULL,
+   TXN_HOST varchar(128) NOT NULL,
+   TXN_AGENT_INFO varchar(128),
+   TXN_META_INFO varchar(128),
+   TXN_HEARTBEAT_COUNT integer,
+   TXN_TYPE integer
+ );
+ 
+ CREATE TABLE TXN_COMPONENTS (
+   TC_TXNID bigint NOT NULL REFERENCES TXNS (TXN_ID),
+   TC_DATABASE varchar(128) NOT NULL,
+   TC_TABLE varchar(128),
+   TC_PARTITION varchar(767),
+   TC_OPERATION_TYPE char(1) NOT NULL,
+   TC_WRITEID bigint
+ );
+ 
+ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
+ 
+ CREATE TABLE COMPLETED_TXN_COMPONENTS (
+   CTC_TXNID bigint NOT NULL,
+   CTC_DATABASE varchar(128) NOT NULL,
+   CTC_TABLE varchar(256),
+   CTC_PARTITION varchar(767),
+   CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL,
+   CTC_WRITEID bigint,
+   CTC_UPDATE_DELETE char(1) NOT NULL
+ );
+ 
+ CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
+ 
+ CREATE TABLE NEXT_TXN_ID (
+   NTXN_NEXT bigint NOT NULL
+ );
+ INSERT INTO NEXT_TXN_ID VALUES(1);
+ 
+ CREATE TABLE HIVE_LOCKS (
+   HL_LOCK_EXT_ID bigint NOT NULL,
+   HL_LOCK_INT_ID bigint NOT NULL,
+   HL_TXNID bigint NOT NULL,
+   HL_DB varchar(128) NOT NULL,
+   HL_TABLE varchar(128),
+   HL_PARTITION varchar(767),
+   HL_LOCK_STATE char(1) NOT NULL,
+   HL_LOCK_TYPE char(1) NOT NULL,
+   HL_LAST_HEARTBEAT bigint NOT NULL,
+   HL_ACQUIRED_AT bigint,
+   HL_USER varchar(128) NOT NULL,
+   HL_HOST varchar(128) NOT NULL,
+   HL_HEARTBEAT_COUNT integer,
+   HL_AGENT_INFO varchar(128),
+   HL_BLOCKEDBY_EXT_ID bigint,
+   HL_BLOCKEDBY_INT_ID bigint,
+   PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+ );
+ 
+ CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
+ 
+ CREATE TABLE NEXT_LOCK_ID (
+   NL_NEXT bigint NOT NULL
+ );
+ INSERT INTO NEXT_LOCK_ID VALUES(1);
+ 
+ CREATE TABLE COMPACTION_QUEUE (
+   CQ_ID bigint PRIMARY KEY,
+   CQ_DATABASE varchar(128) NOT NULL,
+   CQ_TABLE varchar(128) NOT NULL,
+   CQ_PARTITION varchar(767),
+   CQ_STATE char(1) NOT NULL,
+   CQ_TYPE char(1) NOT NULL,
+   CQ_TBLPROPERTIES varchar(2048),
+   CQ_WORKER_ID varchar(128),
+   CQ_START bigint,
+   CQ_RUN_AS varchar(128),
+   CQ_HIGHEST_WRITE_ID bigint,
+   CQ_META_INFO varchar(2048) for bit data,
+   CQ_HADOOP_JOB_ID varchar(32)
+ );
+ 
+ CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+   NCQ_NEXT bigint NOT NULL
+ );
+ INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+ 
+ CREATE TABLE COMPLETED_COMPACTIONS (
+   CC_ID bigint PRIMARY KEY,
+   CC_DATABASE varchar(128) NOT NULL,
+   CC_TABLE varchar(128) NOT NULL,
+   CC_PARTITION varchar(767),
+   CC_STATE char(1) NOT NULL,
+   CC_TYPE char(1) NOT NULL,
+   CC_TBLPROPERTIES varchar(2048),
+   CC_WORKER_ID varchar(128),
+   CC_START bigint,
+   CC_END bigint,
+   CC_RUN_AS varchar(128),
+   CC_HIGHEST_WRITE_ID bigint,
+   CC_META_INFO varchar(2048) for bit data,
+   CC_HADOOP_JOB_ID varchar(32)
+ );
+ 
+ CREATE TABLE AUX_TABLE (
+   MT_KEY1 varchar(128) NOT NULL,
+   MT_KEY2 bigint NOT NULL,
+   MT_COMMENT varchar(255),
+   PRIMARY KEY(MT_KEY1, MT_KEY2)
+ );
+ 
+ --1st 4 cols make up a PK but since WS_PARTITION is nullable we can't declare such PK
+ --This is a good candidate for Index orgainzed table
+ CREATE TABLE WRITE_SET (
+   WS_DATABASE varchar(128) NOT NULL,
+   WS_TABLE varchar(128) NOT NULL,
+   WS_PARTITION varchar(767),
+   WS_TXNID bigint NOT NULL,
+   WS_COMMIT_ID bigint NOT NULL,
+   WS_OPERATION_TYPE char(1) NOT NULL
+ );
+ 
+ CREATE TABLE TXN_TO_WRITE_ID (
+   T2W_TXNID bigint NOT NULL,
+   T2W_DATABASE varchar(128) NOT NULL,
+   T2W_TABLE varchar(256) NOT NULL,
+   T2W_WRITEID bigint NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID);
+ CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID);
+ 
+ CREATE TABLE NEXT_WRITE_ID (
+   NWI_DATABASE varchar(128) NOT NULL,
+   NWI_TABLE varchar(256) NOT NULL,
+   NWI_NEXT bigint NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE);
+ 
+ CREATE TABLE MIN_HISTORY_LEVEL (
+   MHL_TXNID bigint NOT NULL,
+   MHL_MIN_OPEN_TXNID bigint NOT NULL,
+   PRIMARY KEY(MHL_TXNID)
+ );
+ 
+ CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
+ 
+ CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+   MRL_TXN_ID BIGINT NOT NULL,
+   MRL_DB_NAME VARCHAR(128) NOT NULL,
+   MRL_TBL_NAME VARCHAR(256) NOT NULL,
+   MRL_LAST_HEARTBEAT BIGINT NOT NULL,
+   PRIMARY KEY(MRL_TXN_ID)
+ );
+ 
+ CREATE TABLE "APP"."I_SCHEMA" (
+   "SCHEMA_ID" bigint primary key,
+   "SCHEMA_TYPE" integer not null,
+   "NAME" varchar(256) unique,
+   "DB_ID" bigint references "APP"."DBS" ("DB_ID"),
+   "COMPATIBILITY" integer not null,
+   "VALIDATION_LEVEL" integer not null,
+   "CAN_EVOLVE" char(1) not null,
+   "SCHEMA_GROUP" varchar(256),
+   "DESCRIPTION" varchar(4000)
+ );
+ 
+ CREATE TABLE "APP"."SCHEMA_VERSION" (
+   "SCHEMA_VERSION_ID" bigint primary key,
+   "SCHEMA_ID" bigint references "APP"."I_SCHEMA" ("SCHEMA_ID"),
+   "VERSION" integer not null,
+   "CREATED_AT" bigint not null,
+   "CD_ID" bigint references "APP"."CDS" ("CD_ID"),
+   "STATE" integer not null,
+   "DESCRIPTION" varchar(4000),
+   "SCHEMA_TEXT" clob,
+   "FINGERPRINT" varchar(256),
+   "SCHEMA_VERSION_NAME" varchar(256),
+   "SERDE_ID" bigint references "APP"."SERDES" ("SERDE_ID")
+ );
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUE_SCHEMA_VERSION" ON "APP"."SCHEMA_VERSION" ("SCHEMA_ID", "VERSION");
+ 
+ CREATE TABLE REPL_TXN_MAP (
+   RTM_REPL_POLICY varchar(256) NOT NULL,
+   RTM_SRC_TXN_ID bigint NOT NULL,
+   RTM_TARGET_TXN_ID bigint NOT NULL,
+   PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID)
+ );
+ 
+ CREATE TABLE "APP"."RUNTIME_STATS" (
+   "RS_ID" bigint primary key,
+   "CREATE_TIME" integer not null,
+   "WEIGHT" integer not null,
+   "PAYLOAD" BLOB
+ );
+ 
+ CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+ 
+ CREATE TABLE TXN_WRITE_NOTIFICATION_LOG (
+   WNL_ID bigint NOT NULL,
+   WNL_TXNID bigint NOT NULL,
+   WNL_WRITEID bigint NOT NULL,
+   WNL_DATABASE varchar(128) NOT NULL,
+   WNL_TABLE varchar(128) NOT NULL,
+   WNL_PARTITION varchar(1024) NOT NULL,
+   WNL_TABLE_OBJ clob NOT NULL,
+   WNL_PARTITION_OBJ clob,
+   WNL_FILES clob,
+   WNL_EVENT_TIME integer NOT NULL,
+   PRIMARY KEY (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION)
+ );
+ INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1);
+ 
+ -- -----------------------------------------------------------------
+ -- Record schema version. Should be the last step in the init script
+ -- -----------------------------------------------------------------
+ INSERT INTO "APP"."VERSION" (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.0.0', 'Hive release version 4.0.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql
index 0000000,a511376..d4fb299
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql
@@@ -1,0 -1,6 +1,8 @@@
+ -- Upgrade MetaStore schema from 3.1.0 to 4.0.0
 -
++-- HIVE-19416
++ALTER TABLE "APP"."TBLS" ADD WRITE_ID bigint DEFAULT 0;
++ALTER TABLE "APP"."PARTITIONS" ADD WRITE_ID bigint DEFAULT 0;
+ 
+ -- This needs to be the last thing done.  Insert any changes above this line.
+ UPDATE "APP".VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
+ 

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
index 0000000,bbc8ea2..810d48a
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
@@@ -1,0 -1,1284 +1,1284 @@@
+ -- Licensed to the Apache Software Foundation (ASF) under one or more
+ -- contributor license agreements.  See the NOTICE file distributed with
+ -- this work for additional information regarding copyright ownership.
+ -- The ASF licenses this file to You under the Apache License, Version 2.0
+ -- (the "License"); you may not use this file except in compliance with
+ -- the License.  You may obtain a copy of the License at
+ --
+ --     http://www.apache.org/licenses/LICENSE-2.0
+ --
+ -- Unless required by applicable law or agreed to in writing, software
+ -- distributed under the License is distributed on an "AS IS" BASIS,
+ -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ -- See the License for the specific language governing permissions and
+ -- limitations under the License.
+ 
+ ------------------------------------------------------------------
+ -- DataNucleus SchemaTool (ran at 08/04/2014 15:10:15)
+ ------------------------------------------------------------------
+ -- Complete schema required for the following classes:-
+ --     org.apache.hadoop.hive.metastore.model.MColumnDescriptor
+ --     org.apache.hadoop.hive.metastore.model.MDBPrivilege
+ --     org.apache.hadoop.hive.metastore.model.MDatabase
+ --     org.apache.hadoop.hive.metastore.model.MDelegationToken
+ --     org.apache.hadoop.hive.metastore.model.MFieldSchema
+ --     org.apache.hadoop.hive.metastore.model.MFunction
+ --     org.apache.hadoop.hive.metastore.model.MGlobalPrivilege
+ --     org.apache.hadoop.hive.metastore.model.MIndex
+ --     org.apache.hadoop.hive.metastore.model.MMasterKey
+ --     org.apache.hadoop.hive.metastore.model.MOrder
+ --     org.apache.hadoop.hive.metastore.model.MPartition
+ --     org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege
+ --     org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics
+ --     org.apache.hadoop.hive.metastore.model.MPartitionEvent
+ --     org.apache.hadoop.hive.metastore.model.MPartitionPrivilege
+ --     org.apache.hadoop.hive.metastore.model.MResourceUri
+ --     org.apache.hadoop.hive.metastore.model.MRole
+ --     org.apache.hadoop.hive.metastore.model.MRoleMap
+ --     org.apache.hadoop.hive.metastore.model.MSerDeInfo
+ --     org.apache.hadoop.hive.metastore.model.MStorageDescriptor
+ --     org.apache.hadoop.hive.metastore.model.MStringList
+ --     org.apache.hadoop.hive.metastore.model.MTable
+ --     org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege
+ --     org.apache.hadoop.hive.metastore.model.MTableColumnStatistics
+ --     org.apache.hadoop.hive.metastore.model.MTablePrivilege
+ --     org.apache.hadoop.hive.metastore.model.MType
+ --     org.apache.hadoop.hive.metastore.model.MVersionTable
+ --
+ -- Table MASTER_KEYS for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
+ CREATE TABLE MASTER_KEYS
+ (
+     KEY_ID int NOT NULL,
+     MASTER_KEY nvarchar(767) NULL
+ );
+ 
+ ALTER TABLE MASTER_KEYS ADD CONSTRAINT MASTER_KEYS_PK PRIMARY KEY (KEY_ID);
+ 
+ -- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
+ CREATE TABLE IDXS
+ (
+     INDEX_ID bigint NOT NULL,
+     CREATE_TIME int NOT NULL,
+     DEFERRED_REBUILD bit NOT NULL,
+     INDEX_HANDLER_CLASS nvarchar(4000) NULL,
+     INDEX_NAME nvarchar(128) NULL,
+     INDEX_TBL_ID bigint NULL,
+     LAST_ACCESS_TIME int NOT NULL,
+     ORIG_TBL_ID bigint NULL,
+     SD_ID bigint NULL
+ );
+ 
+ ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
+ 
+ -- Table PART_COL_STATS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics]
+ CREATE TABLE PART_COL_STATS
+ (
+     CS_ID bigint NOT NULL,
+     AVG_COL_LEN float NULL,
+     "COLUMN_NAME" nvarchar(767) NOT NULL,
+     COLUMN_TYPE nvarchar(128) NOT NULL,
+     DB_NAME nvarchar(128) NOT NULL,
+     BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
+     BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL,
+     DOUBLE_HIGH_VALUE float NULL,
+     DOUBLE_LOW_VALUE float NULL,
+     LAST_ANALYZED bigint NOT NULL,
+     LONG_HIGH_VALUE bigint NULL,
+     LONG_LOW_VALUE bigint NULL,
+     MAX_COL_LEN bigint NULL,
+     NUM_DISTINCTS bigint NULL,
+     BIT_VECTOR varbinary(max) NULL,
+     NUM_FALSES bigint NULL,
+     NUM_NULLS bigint NOT NULL,
+     NUM_TRUES bigint NULL,
+     PART_ID bigint NULL,
+     PARTITION_NAME nvarchar(767) NOT NULL,
+     "TABLE_NAME" nvarchar(256) NOT NULL,
+     "CAT_NAME" nvarchar(256) NOT NULL
+ );
+ 
+ ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID);
+ 
+ CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);
+ 
+ -- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ CREATE TABLE PART_PRIVS
+ (
+     PART_GRANT_ID bigint NOT NULL,
+     CREATE_TIME int NOT NULL,
+     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+     GRANTOR nvarchar(128) NULL,
+     GRANTOR_TYPE nvarchar(128) NULL,
+     PART_ID bigint NULL,
+     PRINCIPAL_NAME nvarchar(128) NULL,
+     PRINCIPAL_TYPE nvarchar(128) NULL,
+     PART_PRIV nvarchar(128) NULL,
+     AUTHORIZER nvarchar(128) NULL
+ );
+ 
+ ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
+ 
+ -- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
+ CREATE TABLE SKEWED_STRING_LIST
+ (
+     STRING_LIST_ID bigint NOT NULL
+ );
+ 
+ ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
+ 
+ -- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+ CREATE TABLE ROLES
+ (
+     ROLE_ID bigint NOT NULL,
+     CREATE_TIME int NOT NULL,
+     OWNER_NAME nvarchar(128) NULL,
+     ROLE_NAME nvarchar(128) NULL
+ );
+ 
+ ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+ 
+ -- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
+ CREATE TABLE PARTITIONS
+ (
+     PART_ID bigint NOT NULL,
+     CREATE_TIME int NOT NULL,
+     LAST_ACCESS_TIME int NOT NULL,
+     PART_NAME nvarchar(767) NULL,
+     SD_ID bigint NULL,
 -    TBL_ID bigint NULL
 -);
++    TBL_ID bigint NULL,
++    WRITE_ID bigint NULL);
+ 
+ ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
+ 
+ -- Table CDS for classes [org.apache.hadoop.hive.metastore.model.MColumnDescriptor]
+ CREATE TABLE CDS
+ (
+     CD_ID bigint NOT NULL
+ );
+ 
+ ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+ 
+ -- Table VERSION for classes [org.apache.hadoop.hive.metastore.model.MVersionTable]
+ CREATE TABLE VERSION
+ (
+     VER_ID bigint NOT NULL,
+     SCHEMA_VERSION nvarchar(127) NOT NULL,
+     VERSION_COMMENT nvarchar(255) NOT NULL
+ );
+ 
+ ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
+ 
+ -- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+ CREATE TABLE GLOBAL_PRIVS
+ (
+     USER_GRANT_ID bigint NOT NULL,
+     CREATE_TIME int NOT NULL,
+     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+     GRANTOR nvarchar(128) NULL,
+     GRANTOR_TYPE nvarchar(128) NULL,
+     PRINCIPAL_NAME nvarchar(128) NULL,
+     PRINCIPAL_TYPE nvarchar(128) NULL,
+     USER_PRIV nvarchar(128) NULL,
+     AUTHORIZER nvarchar(128) NULL
+ );
+ 
+ ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
+ 
+ -- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ CREATE TABLE PART_COL_PRIVS
+ (
+     PART_COLUMN_GRANT_ID bigint NOT NULL,
+     "COLUMN_NAME" nvarchar(767) NULL,
+     CREATE_TIME int NOT NULL,
+     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+     GRANTOR nvarchar(128) NULL,
+     GRANTOR_TYPE nvarchar(128) NULL,
+     PART_ID bigint NULL,
+     PRINCIPAL_NAME nvarchar(128) NULL,
+     PRINCIPAL_TYPE nvarchar(128) NULL,
+     PART_COL_PRIV nvarchar(128) NULL,
+     AUTHORIZER nvarchar(128) NULL
+ );
+ 
+ ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
+ 
+ -- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ CREATE TABLE DB_PRIVS
+ (
+     DB_GRANT_ID bigint NOT NULL,
+     CREATE_TIME int NOT NULL,
+     DB_ID bigint NULL,
+     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+     GRANTOR nvarchar(128) NULL,
+     GRANTOR_TYPE nvarchar(128) NULL,
+     PRINCIPAL_NAME nvarchar(128) NULL,
+     PRINCIPAL_TYPE nvarchar(128) NULL,
+     DB_PRIV nvarchar(128) NULL,
+     AUTHORIZER nvarchar(128) NULL
+ );
+ 
+ ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
+ 
+ -- Table TAB_COL_STATS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnStatistics]
+ CREATE TABLE TAB_COL_STATS
+ (
+     CS_ID bigint NOT NULL,
+     AVG_COL_LEN float NULL,
+     "COLUMN_NAME" nvarchar(767) NOT NULL,
+     COLUMN_TYPE nvarchar(128) NOT NULL,
+     DB_NAME nvarchar(128) NOT NULL,
+     BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
+     BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL,
+     DOUBLE_HIGH_VALUE float NULL,
+     DOUBLE_LOW_VALUE float NULL,
+     LAST_ANALYZED bigint NOT NULL,
+     LONG_HIGH_VALUE bigint NULL,
+     LONG_LOW_VALUE bigint NULL,
+     MAX_COL_LEN bigint NULL,
+     NUM_DISTINCTS bigint NULL,
+     BIT_VECTOR varbinary(max) NULL,
+     NUM_FALSES bigint NULL,
+     NUM_NULLS bigint NOT NULL,
+     NUM_TRUES bigint NULL,
+     TBL_ID bigint NULL,
+     "TABLE_NAME" nvarchar(256) NOT NULL,
+     "CAT_NAME" nvarchar(256) NOT NULL
+ );
+ 
+ ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID);
+ CREATE INDEX TAB_COL_STATS_IDX ON TAB_COL_STATS (CAT_NAME, DB_NAME, TABLE_NAME, COLUMN_NAME);
+ 
+ -- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+ CREATE TABLE TYPES
+ (
+     TYPES_ID bigint NOT NULL,
+     TYPE_NAME nvarchar(128) NULL,
+     TYPE1 nvarchar(767) NULL,
+     TYPE2 nvarchar(767) NULL
+ );
+ 
+ ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+ 
+ -- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ CREATE TABLE TBL_PRIVS
+ (
+     TBL_GRANT_ID bigint NOT NULL,
+     CREATE_TIME int NOT NULL,
+     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+     GRANTOR nvarchar(128) NULL,
+     GRANTOR_TYPE nvarchar(128) NULL,
+     PRINCIPAL_NAME nvarchar(128) NULL,
+     PRINCIPAL_TYPE nvarchar(128) NULL,
+     TBL_PRIV nvarchar(128) NULL,
+     TBL_ID bigint NULL,
+     AUTHORIZER nvarchar(128) NULL
+ );
+ 
+ ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
+ 
+ -- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+ CREATE TABLE DBS
+ (
+     DB_ID bigint NOT NULL,
+     "DESC" nvarchar(4000) NULL,
+     DB_LOCATION_URI nvarchar(4000) NOT NULL,
+     "NAME" nvarchar(128) NULL,
+     OWNER_NAME nvarchar(128) NULL,
+     OWNER_TYPE nvarchar(10) NULL,
+     CTLG_NAME nvarchar(256)
+ );
+ 
+ ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+ 
+ -- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ CREATE TABLE TBL_COL_PRIVS
+ (
+     TBL_COLUMN_GRANT_ID bigint NOT NULL,
+     "COLUMN_NAME" nvarchar(767) NULL,
+     CREATE_TIME int NOT NULL,
+     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+     GRANTOR nvarchar(128) NULL,
+     GRANTOR_TYPE nvarchar(128) NULL,
+     PRINCIPAL_NAME nvarchar(128) NULL,
+     PRINCIPAL_TYPE nvarchar(128) NULL,
+     TBL_COL_PRIV nvarchar(128) NULL,
+     TBL_ID bigint NULL,
+     AUTHORIZER nvarchar(128) NULL
+ );
+ 
+ ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
+ 
+ -- Table DELEGATION_TOKENS for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+ CREATE TABLE DELEGATION_TOKENS
+ (
+     TOKEN_IDENT nvarchar(767) NOT NULL,
+     TOKEN nvarchar(767) NULL
+ );
+ 
+ ALTER TABLE DELEGATION_TOKENS ADD CONSTRAINT DELEGATION_TOKENS_PK PRIMARY KEY (TOKEN_IDENT);
+ 
+ -- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+ CREATE TABLE SERDES
+ (
+     SERDE_ID bigint NOT NULL,
+     "NAME" nvarchar(128) NULL,
+     SLIB nvarchar(4000) NULL,
+     "DESCRIPTION" nvarchar(4000),
+     "SERIALIZER_CLASS" nvarchar(4000),
+     "DESERIALIZER_CLASS" nvarchar(4000),
+     "SERDE_TYPE" int
+ );
+ 
+ ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+ 
+ -- Table FUNCS for classes [org.apache.hadoop.hive.metastore.model.MFunction]
+ CREATE TABLE FUNCS
+ (
+     FUNC_ID bigint NOT NULL,
+     CLASS_NAME nvarchar(4000) NULL,
+     CREATE_TIME int NOT NULL,
+     DB_ID bigint NULL,
+     FUNC_NAME nvarchar(128) NULL,
+     FUNC_TYPE int NOT NULL,
+     OWNER_NAME nvarchar(128) NULL,
+     OWNER_TYPE nvarchar(10) NULL
+ );
+ 
+ ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
+ 
+ -- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ CREATE TABLE ROLE_MAP
+ (
+     ROLE_GRANT_ID bigint NOT NULL,
+     ADD_TIME int NOT NULL,
+     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+     GRANTOR nvarchar(128) NULL,
+     GRANTOR_TYPE nvarchar(128) NULL,
+     PRINCIPAL_NAME nvarchar(128) NULL,
+     PRINCIPAL_TYPE nvarchar(128) NULL,
+     ROLE_ID bigint NULL
+ );
+ 
+ ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
+ 
+ -- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
+ CREATE TABLE TBLS
+ (
+     TBL_ID bigint NOT NULL,
+     CREATE_TIME int NOT NULL,
+     DB_ID bigint NULL,
+     LAST_ACCESS_TIME int NOT NULL,
+     OWNER nvarchar(767) NULL,
+     OWNER_TYPE nvarchar(10) NULL,
+     RETENTION int NOT NULL,
+     SD_ID bigint NULL,
+     TBL_NAME nvarchar(256) NULL,
+     TBL_TYPE nvarchar(128) NULL,
+     VIEW_EXPANDED_TEXT text NULL,
+     VIEW_ORIGINAL_TEXT text NULL,
 -    IS_REWRITE_ENABLED bit NOT NULL DEFAULT 0
 -);
++    IS_REWRITE_ENABLED bit NOT NULL DEFAULT 0,
++    WRITE_ID bigint NULL);
+ 
+ ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
+ 
+ -- Table MV_CREATION_METADATA for classes [org.apache.hadoop.hive.metastore.model.MCreationMetadata]
+ CREATE TABLE MV_CREATION_METADATA
+ (
+     MV_CREATION_METADATA_ID bigint NOT NULL,
+     CAT_NAME nvarchar(256) NOT NULL,
+     DB_NAME nvarchar(128) NOT NULL,
+     TBL_NAME nvarchar(256) NOT NULL,
+     TXN_LIST text NULL,
+     MATERIALIZATION_TIME bigint NOT NULL
+ );
+ 
+ ALTER TABLE MV_CREATION_METADATA ADD CONSTRAINT MV_CREATION_METADATA_PK PRIMARY KEY (MV_CREATION_METADATA_ID);
+ CREATE INDEX MV_UNIQUE_TABLE ON MV_CREATION_METADATA (TBL_NAME,DB_NAME);
+ 
+ 
+ CREATE TABLE MV_TABLES_USED
+ (
+     MV_CREATION_METADATA_ID bigint NOT NULL,
+     TBL_ID bigint NOT NULL
+ );
+ 
+ ALTER TABLE MV_TABLES_USED WITH CHECK ADD FOREIGN KEY(MV_CREATION_METADATA_ID) REFERENCES MV_CREATION_METADATA (MV_CREATION_METADATA_ID);
+ ALTER TABLE MV_TABLES_USED WITH CHECK ADD FOREIGN KEY(TBL_ID) REFERENCES TBLS (TBL_ID);
+ 
+ -- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ CREATE TABLE SDS
+ (
+     SD_ID bigint NOT NULL,
+     CD_ID bigint NULL,
+     INPUT_FORMAT nvarchar(4000) NULL,
+     IS_COMPRESSED bit NOT NULL,
+     IS_STOREDASSUBDIRECTORIES bit NOT NULL,
+     LOCATION nvarchar(4000) NULL,
+     NUM_BUCKETS int NOT NULL,
+     OUTPUT_FORMAT nvarchar(4000) NULL,
+     SERDE_ID bigint NULL
+ );
+ 
+ ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
+ 
+ -- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+ CREATE TABLE PARTITION_EVENTS
+ (
+     PART_NAME_ID bigint NOT NULL,
+     CAT_NAME nvarchar(256) NULL,
+     DB_NAME nvarchar(128) NULL,
+     EVENT_TIME bigint NOT NULL,
+     EVENT_TYPE int NOT NULL,
+     PARTITION_NAME nvarchar(767) NULL,
+     TBL_NAME nvarchar(256) NULL
+ );
+ 
+ ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
+ 
+ -- Table SORT_COLS for join relationship
+ CREATE TABLE SORT_COLS
+ (
+     SD_ID bigint NOT NULL,
+     "COLUMN_NAME" nvarchar(767) NULL,
+     "ORDER" int NOT NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+ 
+ -- Table SKEWED_COL_NAMES for join relationship
+ CREATE TABLE SKEWED_COL_NAMES
+ (
+     SD_ID bigint NOT NULL,
+     SKEWED_COL_NAME nvarchar(255) NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+ 
+ -- Table SKEWED_COL_VALUE_LOC_MAP for join relationship
+ CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
+ (
+     SD_ID bigint NOT NULL,
+     STRING_LIST_ID_KID bigint NOT NULL,
+     LOCATION nvarchar(4000) NULL
+ );
+ 
+ ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
+ 
+ -- Table SKEWED_STRING_LIST_VALUES for join relationship
+ CREATE TABLE SKEWED_STRING_LIST_VALUES
+ (
+     STRING_LIST_ID bigint NOT NULL,
+     STRING_LIST_VALUE nvarchar(255) NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
+ 
+ -- Table PARTITION_KEY_VALS for join relationship
+ CREATE TABLE PARTITION_KEY_VALS
+ (
+     PART_ID bigint NOT NULL,
+     PART_KEY_VAL nvarchar(255) NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
+ 
+ -- Table PARTITION_KEYS for join relationship
+ CREATE TABLE PARTITION_KEYS
+ (
+     TBL_ID bigint NOT NULL,
+     PKEY_COMMENT nvarchar(4000) NULL,
+     PKEY_NAME nvarchar(128) NOT NULL,
+     PKEY_TYPE nvarchar(767) NOT NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
+ 
+ -- Table SKEWED_VALUES for join relationship
+ CREATE TABLE SKEWED_VALUES
+ (
+     SD_ID_OID bigint NOT NULL,
+     STRING_LIST_ID_EID bigint NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
+ 
+ -- Table SD_PARAMS for join relationship
+ CREATE TABLE SD_PARAMS
+ (
+     SD_ID bigint NOT NULL,
+     PARAM_KEY nvarchar(256) NOT NULL,
+     PARAM_VALUE varchar(max) NULL
+ );
+ 
+ ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
+ 
+ -- Table FUNC_RU for join relationship
+ CREATE TABLE FUNC_RU
+ (
+     FUNC_ID bigint NOT NULL,
+     RESOURCE_TYPE int NOT NULL,
+     RESOURCE_URI nvarchar(4000) NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID,INTEGER_IDX);
+ 
+ -- Table TYPE_FIELDS for join relationship
+ CREATE TABLE TYPE_FIELDS
+ (
+     TYPE_NAME bigint NOT NULL,
+     COMMENT nvarchar(256) NULL,
+     FIELD_NAME nvarchar(128) NOT NULL,
+     FIELD_TYPE nvarchar(767) NOT NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
+ 
+ -- Table BUCKETING_COLS for join relationship
+ CREATE TABLE BUCKETING_COLS
+ (
+     SD_ID bigint NOT NULL,
+     BUCKET_COL_NAME nvarchar(255) NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+ 
+ -- Table DATABASE_PARAMS for join relationship
+ CREATE TABLE DATABASE_PARAMS
+ (
+     DB_ID bigint NOT NULL,
+     PARAM_KEY nvarchar(180) NOT NULL,
+     PARAM_VALUE nvarchar(4000) NULL
+ );
+ 
+ ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
+ 
+ -- Table INDEX_PARAMS for join relationship
+ CREATE TABLE INDEX_PARAMS
+ (
+     INDEX_ID bigint NOT NULL,
+     PARAM_KEY nvarchar(256) NOT NULL,
+     PARAM_VALUE nvarchar(4000) NULL
+ );
+ 
+ ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
+ 
+ -- Table COLUMNS_V2 for join relationship
+ CREATE TABLE COLUMNS_V2
+ (
+     CD_ID bigint NOT NULL,
+     COMMENT nvarchar(256) NULL,
+     "COLUMN_NAME" nvarchar(767) NOT NULL,
+     TYPE_NAME varchar(max) NOT NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
+ 
+ -- Table SERDE_PARAMS for join relationship
+ CREATE TABLE SERDE_PARAMS
+ (
+     SERDE_ID bigint NOT NULL,
+     PARAM_KEY nvarchar(256) NOT NULL,
+     PARAM_VALUE varchar(max) NULL
+ );
+ 
+ ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
+ 
+ -- Table PARTITION_PARAMS for join relationship
+ CREATE TABLE PARTITION_PARAMS
+ (
+     PART_ID bigint NOT NULL,
+     PARAM_KEY nvarchar(256) NOT NULL,
+     PARAM_VALUE nvarchar(4000) NULL
+ );
+ 
+ ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
+ 
+ -- Table TABLE_PARAMS for join relationship
+ CREATE TABLE TABLE_PARAMS
+ (
+     TBL_ID bigint NOT NULL,
+     PARAM_KEY nvarchar(256) NOT NULL,
+     PARAM_VALUE varchar(max) NULL
+ );
+ 
+ ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
+ 
+ CREATE TABLE NOTIFICATION_LOG
+ (
+     NL_ID bigint NOT NULL,
+     EVENT_ID bigint NOT NULL,
+     EVENT_TIME int NOT NULL,
+     EVENT_TYPE nvarchar(32) NOT NULL,
+     CAT_NAME nvarchar(128) NULL,
+     DB_NAME nvarchar(128) NULL,
+     TBL_NAME nvarchar(256) NULL,
+     MESSAGE_FORMAT nvarchar(16),
+     MESSAGE text NULL
+ );
+ 
+ ALTER TABLE NOTIFICATION_LOG ADD CONSTRAINT NOTIFICATION_LOG_PK PRIMARY KEY (NL_ID);
+ 
+ CREATE TABLE NOTIFICATION_SEQUENCE
+ (
+     NNI_ID bigint NOT NULL,
+     NEXT_EVENT_ID bigint NOT NULL
+ );
+ 
+ ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID);
+ 
+ -- Tables to manage resource plans.
+ 
+ CREATE TABLE WM_RESOURCEPLAN
+ (
+     RP_ID bigint NOT NULL,
+     "NAME" nvarchar(128) NOT NULL,
+     QUERY_PARALLELISM int,
+     STATUS nvarchar(20) NOT NULL,
+     DEFAULT_POOL_ID bigint
+ );
+ 
+ ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID);
+ 
+ CREATE TABLE WM_POOL
+ (
+     POOL_ID bigint NOT NULL,
+     RP_ID bigint NOT NULL,
+     PATH nvarchar(1024) NOT NULL,
+     ALLOC_FRACTION float,
+     QUERY_PARALLELISM int,
+     SCHEDULING_POLICY nvarchar(1024)
+ );
+ 
+ ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID);
+ 
+ CREATE TABLE WM_TRIGGER
+ (
+     TRIGGER_ID bigint NOT NULL,
+     RP_ID bigint NOT NULL,
+     "NAME" nvarchar(128) NOT NULL,
+     TRIGGER_EXPRESSION nvarchar(1024),
+     ACTION_EXPRESSION nvarchar(1024),
+     IS_IN_UNMANAGED bit NOT NULL DEFAULT 0
+ );
+ 
+ ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_PK PRIMARY KEY (TRIGGER_ID);
+ 
+ CREATE TABLE WM_POOL_TO_TRIGGER
+ (
+     POOL_ID bigint NOT NULL,
+     TRIGGER_ID bigint NOT NULL
+ );
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_PK PRIMARY KEY (POOL_ID, TRIGGER_ID);
+ 
+ CREATE TABLE WM_MAPPING
+ (
+     MAPPING_ID bigint NOT NULL,
+     RP_ID bigint NOT NULL,
+     ENTITY_TYPE nvarchar(128) NOT NULL,
+     ENTITY_NAME nvarchar(128) NOT NULL,
+     POOL_ID bigint,
+     ORDERING int
+ );
+ 
+ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID);
+ 
+ CREATE TABLE CTLGS (
+       CTLG_ID bigint primary key,
+       "NAME" nvarchar(256),
+       "DESC" nvarchar(4000),
+       LOCATION_URI nvarchar(4000) not null
+ );
+ 
+ CREATE UNIQUE INDEX UNIQUE_CTLG ON CTLGS ("NAME");
+ 
+ -- Constraints for table MASTER_KEYS for class(es) [org.apache.hadoop.hive.metastore.model.MMasterKey]
+ 
+ -- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
+ ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) ;
+ 
+ ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+ 
+ ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) ;
+ 
+ CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
+ 
+ CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
+ 
+ CREATE INDEX IDXS_N50 ON IDXS (ORIG_TBL_ID);
+ 
+ CREATE INDEX IDXS_N49 ON IDXS (INDEX_TBL_ID);
+ 
+ 
+ -- Constraints for table PART_COL_STATS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics]
+ ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+ 
+ CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+ 
+ 
+ -- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+ 
+ CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (AUTHORIZER,PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
+ 
+ 
+ -- Constraints for table SKEWED_STRING_LIST for class(es) [org.apache.hadoop.hive.metastore.model.MStringList]
+ 
+ -- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
+ CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
+ 
+ 
+ -- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
+ ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+ 
+ ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+ 
+ CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
+ 
+ CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
+ 
+ 
+ -- Constraints for table CDS for class(es) [org.apache.hadoop.hive.metastore.model.MColumnDescriptor]
+ 
+ -- Constraints for table VERSION for class(es) [org.apache.hadoop.hive.metastore.model.MVersionTable]
+ 
+ -- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+ CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (AUTHORIZER,PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ 
+ -- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+ 
+ CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
+ 
+ CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (AUTHORIZER,PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ 
+ -- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
+ 
+ CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (AUTHORIZER,DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
+ 
+ 
+ -- Constraints for table TAB_COL_STATS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnStatistics]
+ ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+ 
+ CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS (TBL_ID);
+ 
+ 
+ -- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
+ CREATE UNIQUE INDEX UNIQUETYPE ON TYPES (TYPE_NAME);
+ 
+ 
+ -- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+ 
+ CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
+ 
+ CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (AUTHORIZER,TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ 
+ -- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
+ CREATE UNIQUE INDEX UNIQUEDATABASE ON DBS ("NAME", "CTLG_NAME");
+ 
+ 
+ -- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+ 
+ CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (AUTHORIZER,TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
+ 
+ 
+ -- Constraints for table DELEGATION_TOKENS for class(es) [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+ 
+ -- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+ 
+ -- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunction]
+ ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
+ 
+ CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME,DB_ID);
+ 
+ CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
+ 
+ 
+ -- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) ;
+ 
+ CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
+ 
+ CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
+ 
+ 
+ -- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
+ ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+ 
+ ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
+ 
+ CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
+ 
+ CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
+ 
+ 
+ -- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) ;
+ 
+ ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) ;
+ 
+ CREATE INDEX SDS_N50 ON SDS (CD_ID);
+ 
+ CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
+ 
+ 
+ -- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+ CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
+ 
+ 
+ -- Constraints for table SORT_COLS
+ ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+ 
+ CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
+ 
+ 
+ -- Constraints for table SKEWED_COL_NAMES
+ ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+ 
+ CREATE INDEX SKEWED_COL_NAMES_N49 ON SKEWED_COL_NAMES (SD_ID);
+ 
+ 
+ -- Constraints for table SKEWED_COL_VALUE_LOC_MAP
+ ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+ 
+ ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ;
+ 
+ CREATE INDEX SKEWED_COL_VALUE_LOC_MAP_N50 ON SKEWED_COL_VALUE_LOC_MAP (STRING_LIST_ID_KID);
+ 
+ CREATE INDEX SKEWED_COL_VALUE_LOC_MAP_N49 ON SKEWED_COL_VALUE_LOC_MAP (SD_ID);
+ 
+ 
+ -- Constraints for table SKEWED_STRING_LIST_VALUES
+ ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ;
+ 
+ CREATE INDEX SKEWED_STRING_LIST_VALUES_N49 ON SKEWED_STRING_LIST_VALUES (STRING_LIST_ID);
+ 
+ 
+ -- Constraints for table PARTITION_KEY_VALS
+ ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+ 
+ CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
+ 
+ 
+ -- Constraints for table PARTITION_KEYS
+ ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+ 
+ CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
+ 
+ 
+ -- Constraints for table SKEWED_VALUES
+ ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) ;
+ 
+ ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ;
+ 
+ CREATE INDEX SKEWED_VALUES_N50 ON SKEWED_VALUES (SD_ID_OID);
+ 
+ CREATE INDEX SKEWED_VALUES_N49 ON SKEWED_VALUES (STRING_LIST_ID_EID);
+ 
+ 
+ -- Constraints for table SD_PARAMS
+ ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+ 
+ CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
+ 
+ 
+ -- Constraints for table FUNC_RU
+ ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) ;
+ 
+ CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
+ 
+ 
+ -- Constraints for table TYPE_FIELDS
+ ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) ;
+ 
+ CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
+ 
+ 
+ -- Constraints for table BUCKETING_COLS
+ ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+ 
+ CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
+ 
+ 
+ -- Constraints for table DATABASE_PARAMS
+ ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
+ 
+ CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
+ 
+ 
+ -- Constraints for table INDEX_PARAMS
+ ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) ;
+ 
+ CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
+ 
+ 
+ -- Constraints for table COLUMNS_V2
+ ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) ;
+ 
+ CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
+ 
+ 
+ -- Constraints for table SERDE_PARAMS
+ ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) ;
+ 
+ CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
+ 
+ 
+ -- Constraints for table PARTITION_PARAMS
+ ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+ 
+ CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
+ 
+ 
+ -- Constraints for table TABLE_PARAMS
+ ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+ 
+ CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
+ 
+ -- Constraints for resource plan tables.
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN ("NAME");
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, PATH);
+ 
+ ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_FK1 FOREIGN KEY (DEFAULT_POOL_ID) REFERENCES WM_POOL (POOL_ID);
+ 
+ ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME");
+ 
+ ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK1 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK2 FOREIGN KEY (TRIGGER_ID) REFERENCES WM_TRIGGER (TRIGGER_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_NAME);
+ 
+ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ ALTER TABLE DBS ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES CTLGS ("NAME");
+ -- -----------------------------------------------------------------------------------------------------------------------------------------------
+ -- Transaction and Lock Tables
+ -- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
+ -- -----------------------------------------------------------------------------------------------------------------------------------------------
+ CREATE TABLE COMPACTION_QUEUE(
+ 	CQ_ID bigint NOT NULL,
+ 	CQ_DATABASE nvarchar(128) NOT NULL,
+ 	CQ_TABLE nvarchar(128) NOT NULL,
+ 	CQ_PARTITION nvarchar(767) NULL,
+ 	CQ_STATE char(1) NOT NULL,
+ 	CQ_TYPE char(1) NOT NULL,
+ 	CQ_TBLPROPERTIES nvarchar(2048) NULL,
+ 	CQ_WORKER_ID nvarchar(128) NULL,
+ 	CQ_START bigint NULL,
+ 	CQ_RUN_AS nvarchar(128) NULL,
+     CQ_HIGHEST_WRITE_ID bigint NULL,
+     CQ_META_INFO varbinary(2048) NULL,
+ 	CQ_HADOOP_JOB_ID nvarchar(128) NULL,
+ PRIMARY KEY CLUSTERED
+ (
+ 	CQ_ID ASC
+ )
+ );
+ 
+ CREATE TABLE COMPLETED_COMPACTIONS (
+ 	CC_ID bigint NOT NULL,
+ 	CC_DATABASE nvarchar(128) NOT NULL,
+ 	CC_TABLE nvarchar(128) NOT NULL,
+ 	CC_PARTITION nvarchar(767) NULL,
+ 	CC_STATE char(1) NOT NULL,
+ 	CC_TYPE char(1) NOT NULL,
+ 	CC_TBLPROPERTIES nvarchar(2048) NULL,
+ 	CC_WORKER_ID nvarchar(128) NULL,
+ 	CC_START bigint NULL,
+ 	CC_END bigint NULL,
+ 	CC_RUN_AS nvarchar(128) NULL,
+     CC_HIGHEST_WRITE_ID bigint NULL,
+     CC_META_INFO varbinary(2048) NULL,
+ 	CC_HADOOP_JOB_ID nvarchar(128) NULL,
+ PRIMARY KEY CLUSTERED
+ (
+ 	CC_ID ASC
+ )
+ );
+ 
+ CREATE TABLE COMPLETED_TXN_COMPONENTS(
+ 	CTC_TXNID bigint NOT NULL,
+ 	CTC_DATABASE nvarchar(128) NOT NULL,
+ 	CTC_TABLE nvarchar(128) NULL,
+ 	CTC_PARTITION nvarchar(767) NULL,
+     CTC_TIMESTAMP datetime2 DEFAULT CURRENT_TIMESTAMP NOT NULL,
+     CTC_WRITEID bigint,
+     CTC_UPDATE_DELETE char(1) NOT NULL
+ );
+ 
+ CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
+ 
+ CREATE TABLE HIVE_LOCKS(
+ 	HL_LOCK_EXT_ID bigint NOT NULL,
+ 	HL_LOCK_INT_ID bigint NOT NULL,
+ 	HL_TXNID bigint NOT NULL,
+ 	HL_DB nvarchar(128) NOT NULL,
+ 	HL_TABLE nvarchar(128) NULL,
+ 	HL_PARTITION nvarchar(767) NULL,
+ 	HL_LOCK_STATE char(1) NOT NULL,
+ 	HL_LOCK_TYPE char(1) NOT NULL,
+ 	HL_LAST_HEARTBEAT bigint NOT NULL,
+ 	HL_ACQUIRED_AT bigint NULL,
+ 	HL_USER nvarchar(128) NOT NULL,
+ 	HL_HOST nvarchar(128) NOT NULL,
+     HL_HEARTBEAT_COUNT int NULL,
+     HL_AGENT_INFO nvarchar(128) NULL,
+     HL_BLOCKEDBY_EXT_ID bigint NULL,
+     HL_BLOCKEDBY_INT_ID bigint NULL,
+ PRIMARY KEY CLUSTERED
+ (
+ 	HL_LOCK_EXT_ID ASC,
+ 	HL_LOCK_INT_ID ASC
+ )
+ );
+ 
+ CREATE TABLE NEXT_COMPACTION_QUEUE_ID(
+ 	NCQ_NEXT bigint NOT NULL
+ );
+ 
+ INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+ 
+ CREATE TABLE NEXT_LOCK_ID(
+ 	NL_NEXT bigint NOT NULL
+ );
+ 
+ INSERT INTO NEXT_LOCK_ID VALUES(1);
+ 
+ CREATE TABLE NEXT_TXN_ID(
+ 	NTXN_NEXT bigint NOT NULL
+ );
+ 
+ INSERT INTO NEXT_TXN_ID VALUES(1);
+ 
+ CREATE TABLE TXNS(
+ 	TXN_ID bigint NOT NULL,
+ 	TXN_STATE char(1) NOT NULL,
+ 	TXN_STARTED bigint NOT NULL,
+ 	TXN_LAST_HEARTBEAT bigint NOT NULL,
+ 	TXN_USER nvarchar(128) NOT NULL,
+ 	TXN_HOST nvarchar(128) NOT NULL,
+     TXN_AGENT_INFO nvarchar(128) NULL,
+     TXN_META_INFO nvarchar(128) NULL,
+     TXN_HEARTBEAT_COUNT int NULL,
+     TXN_TYPE int NULL,
+ PRIMARY KEY CLUSTERED
+ (
+ 	TXN_ID ASC
+ )
+ );
+ 
+ CREATE TABLE TXN_COMPONENTS(
+ 	TC_TXNID bigint NOT NULL,
+ 	TC_DATABASE nvarchar(128) NOT NULL,
+ 	TC_TABLE nvarchar(128) NULL,
+ 	TC_PARTITION nvarchar(767) NULL,
+     TC_OPERATION_TYPE char(1) NOT NULL,
+     TC_WRITEID bigint
+ );
+ 
+ ALTER TABLE TXN_COMPONENTS  WITH CHECK ADD FOREIGN KEY(TC_TXNID) REFERENCES TXNS (TXN_ID);
+ 
+ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
+ 
+ CREATE TABLE AUX_TABLE (
+   MT_KEY1 nvarchar(128) NOT NULL,
+   MT_KEY2 bigint NOT NULL,
+   MT_COMMENT nvarchar(255) NULL,
+   PRIMARY KEY CLUSTERED
+ (
+     MT_KEY1 ASC,
+     MT_KEY2 ASC
+ )
+ );
+ 
+ CREATE TABLE KEY_CONSTRAINTS
+ (
+   CHILD_CD_ID BIGINT,
+   CHILD_INTEGER_IDX INT,
+   CHILD_TBL_ID BIGINT,
+   PARENT_CD_ID BIGINT,
+   PARENT_INTEGER_IDX INT NOT NULL,
+   PARENT_TBL_ID BIGINT NOT NULL,
+   POSITION INT NOT NULL,
+   CONSTRAINT_NAME VARCHAR(400) NOT NULL,
+   CONSTRAINT_TYPE SMALLINT NOT NULL,
+   UPDATE_RULE SMALLINT,
+   DELETE_RULE SMALLINT,
+   ENABLE_VALIDATE_RELY SMALLINT NOT NULL,
+   DEFAULT_VALUE VARCHAR(400)
+ ) ;
+ 
+ ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAINT_NAME, POSITION);
+ 
+ CREATE INDEX CONSTRAINTS_PARENT_TBL_ID__INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID);
+ 
+ CREATE INDEX CONSTRAINTS_CONSTRAINT_TYPE_INDEX ON KEY_CONSTRAINTS(CONSTRAINT_TYPE);
+ 
+ CREATE TABLE WRITE_SET (
+   WS_DATABASE nvarchar(128) NOT NULL,
+   WS_TABLE nvarchar(128) NOT NULL,
+   WS_PARTITION nvarchar(767),
+   WS_TXNID bigint NOT NULL,
+   WS_COMMIT_ID bigint NOT NULL,
+   WS_OPERATION_TYPE char(1) NOT NULL
+ );
+ 
+ CREATE TABLE METASTORE_DB_PROPERTIES (
+   PROPERTY_KEY VARCHAR(255) NOT NULL,
+   PROPERTY_VALUE VARCHAR(1000) NOT NULL,
+   DESCRIPTION VARCHAR(1000)
+ );
+ 
+ ALTER TABLE METASTORE_DB_PROPERTIES ADD CONSTRAINT PROPERTY_KEY_PK PRIMARY KEY (PROPERTY_KEY);
+ 
+ CREATE TABLE TXN_TO_WRITE_ID (
+   T2W_TXNID bigint NOT NULL,
+   T2W_DATABASE nvarchar(128) NOT NULL,
+   T2W_TABLE nvarchar(256) NOT NULL,
+   T2W_WRITEID bigint NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID);
+ CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID);
+ 
+ CREATE TABLE NEXT_WRITE_ID (
+   NWI_DATABASE nvarchar(128) NOT NULL,
+   NWI_TABLE nvarchar(256) NOT NULL,
+   NWI_NEXT bigint NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE);
+ 
+ CREATE TABLE MIN_HISTORY_LEVEL (
+   MHL_TXNID bigint NOT NULL,
+   MHL_MIN_OPEN_TXNID bigint NOT NULL,
+ PRIMARY KEY CLUSTERED
+ (
+     MHL_TXNID ASC
+ )
+ );
+ 
+ CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
+ 
+ CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+   MRL_TXN_ID bigint NOT NULL,
+   MRL_DB_NAME nvarchar(128) NOT NULL,
+   MRL_TBL_NAME nvarchar(256) NOT NULL,
+   MRL_LAST_HEARTBEAT bigint NOT NULL,
+ PRIMARY KEY CLUSTERED
+ (
+     MRL_TXN_ID ASC
+ )
+ );
+ 
+ CREATE TABLE "I_SCHEMA" (
+   "SCHEMA_ID" bigint primary key,
+   "SCHEMA_TYPE" int not null,
+   "NAME" nvarchar(256) unique,
+   "DB_ID" bigint references "DBS" ("DB_ID"),
+   "COMPATIBILITY" int not null,
+   "VALIDATION_LEVEL" int not null,
+   "CAN_EVOLVE" bit not null,
+   "SCHEMA_GROUP" nvarchar(256),
+   "DESCRIPTION" nvarchar(4000),
+ );
+ 
+ CREATE TABLE "SCHEMA_VERSION" (
+   "SCHEMA_VERSION_ID" bigint primary key,
+   "SCHEMA_ID" bigint references "I_SCHEMA" ("SCHEMA_ID"),
+   "VERSION" int not null,
+   "CREATED_AT" bigint not null,
+   "CD_ID" bigint references "CDS" ("CD_ID"),
+   "STATE" int not null,
+   "DESCRIPTION" nvarchar(4000),
+   "SCHEMA_TEXT" varchar(max),
+   "FINGERPRINT" nvarchar(256),
+   "SCHEMA_VERSION_NAME" nvarchar(256),
+   "SERDE_ID" bigint references "SERDES" ("SERDE_ID"),
+   unique ("SCHEMA_ID", "VERSION")
+ );
+ 
+ CREATE TABLE REPL_TXN_MAP (
+   RTM_REPL_POLICY nvarchar(256) NOT NULL,
+   RTM_SRC_TXN_ID bigint NOT NULL,
+   RTM_TARGET_TXN_ID bigint NOT NULL
+ );
+ 
+ ALTER TABLE REPL_TXN_MAP ADD CONSTRAINT REPL_TXN_MAP_PK PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID);
+ 
+ -- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+ -- NOTE: Some versions of SchemaTool do not automatically generate this table.
+ -- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+ CREATE TABLE SEQUENCE_TABLE
+ (
+    SEQUENCE_NAME nvarchar(256) NOT NULL,
+    NEXT_VAL bigint NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX PART_TABLE_PK ON SEQUENCE_TABLE (SEQUENCE_NAME);
+ 
+ INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1);
+ 
+ CREATE TABLE RUNTIME_STATS (
+   RS_ID bigint primary key,
+   CREATE_TIME bigint NOT NULL,
+   WEIGHT bigint NOT NULL,
+   PAYLOAD varbinary(max)
+ );
+ 
+ CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+ 
+ CREATE TABLE TXN_WRITE_NOTIFICATION_LOG (
+   WNL_ID bigint NOT NULL,
+   WNL_TXNID bigint NOT NULL,
+   WNL_WRITEID bigint NOT NULL,
+   WNL_DATABASE nvarchar(128) NOT NULL,
+   WNL_TABLE nvarchar(128) NOT NULL,
+   WNL_PARTITION nvarchar(1024) NOT NULL,
+   WNL_TABLE_OBJ text NOT NULL,
+   WNL_PARTITION_OBJ text,
+   WNL_FILES text,
+   WNL_EVENT_TIME int NOT NULL
+ );
+ 
+ ALTER TABLE TXN_WRITE_NOTIFICATION_LOG ADD CONSTRAINT TXN_WRITE_NOTIFICATION_LOG_PK PRIMARY KEY (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION);
+ 
+ INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1);
+ 
+ -- -----------------------------------------------------------------
+ -- Record schema version. Should be the last step in the init script
+ -- -----------------------------------------------------------------
+ INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.0.0', 'Hive release version 4.0.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql
index 0000000,27b7026..acc9361
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql
@@@ -1,0 -1,6 +1,10 @@@
+ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0' AS MESSAGE;
+ 
++-- HIVE-19416
++ALTER TABLE TBLS ADD WRITE_ID bigint NULL;
++ALTER TABLE PARTITIONS ADD WRITE_ID bigint NULL;
++
+ -- These lines need to be last.  Insert any changes above.
+ UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
+ SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0' AS MESSAGE;
+ 


[05/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
index 0000000,cb32236..75ab80b
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
@@@ -1,0 -1,3103 +1,3102 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import java.lang.reflect.Field;
+ import java.io.IOException;
+ import java.sql.Connection;
+ import java.sql.DriverManager;
+ import java.sql.PreparedStatement;
+ import java.sql.SQLException;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.HashSet;
+ import java.util.LinkedHashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Set;
+ import java.util.concurrent.Callable;
+ import java.util.concurrent.ExecutorService;
+ import java.util.concurrent.Executors;
+ import java.util.concurrent.Future;
+ import java.util.concurrent.TimeUnit;
+ 
+ import com.google.common.collect.Sets;
+ import org.apache.hadoop.hive.metastore.api.CreationMetadata;
+ import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.utils.FileUtils;
+ import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+ import org.datanucleus.api.jdo.JDOPersistenceManager;
+ import org.datanucleus.api.jdo.JDOPersistenceManagerFactory;
+ import org.junit.Assert;
+ import org.junit.Before;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.fs.FileSystem;
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.fs.permission.FsPermission;
+ import org.apache.hadoop.hive.metastore.api.AggrStats;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+ import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.DoubleColumnStatsData;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.Function;
+ import org.apache.hadoop.hive.metastore.api.FunctionType;
+ import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PrincipalType;
+ import org.apache.hadoop.hive.metastore.api.ResourceType;
+ import org.apache.hadoop.hive.metastore.api.ResourceUri;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+ import org.apache.hadoop.hive.metastore.api.StringColumnStatsData;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.api.Type;
+ import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.hadoop.util.StringUtils;
+ import org.apache.thrift.TException;
+ import org.junit.Test;
+ 
+ import com.google.common.collect.Lists;
+ 
+ import static org.junit.Assert.assertEquals;
+ import static org.junit.Assert.assertFalse;
+ import static org.junit.Assert.assertNotNull;
+ import static org.junit.Assert.assertNull;
+ import static org.junit.Assert.assertTrue;
+ import static org.junit.Assert.fail;
+ 
+ public abstract class TestHiveMetaStore {
+   private static final Logger LOG = LoggerFactory.getLogger(TestHiveMetaStore.class);
+   protected static HiveMetaStoreClient client;
+   protected static Configuration conf = MetastoreConf.newMetastoreConf();
+   protected static Warehouse warehouse;
+   protected static boolean isThriftClient = false;
+ 
+   private static final String TEST_DB1_NAME = "testdb1";
+   private static final String TEST_DB2_NAME = "testdb2";
+ 
+   private static final int DEFAULT_LIMIT_PARTITION_REQUEST = 100;
+ 
+   protected abstract HiveMetaStoreClient createClient() throws Exception;
+ 
+   @Before
+   public void setUp() throws Exception {
+     warehouse = new Warehouse(conf);
+ 
+     // set some values to use for getting conf. vars
+     MetastoreConf.setBoolVar(conf, ConfVars.METRICS_ENABLED, true);
+     conf.set("hive.key1", "value1");
+     conf.set("hive.key2", "http://www.example.com");
+     conf.set("hive.key3", "");
+     conf.set("hive.key4", "0");
+     conf.set("datanucleus.autoCreateTables", "false");
+ 
+     MetaStoreTestUtils.setConfForStandloneMode(conf);
+     MetastoreConf.setLongVar(conf, ConfVars.BATCH_RETRIEVE_MAX, 2);
+     MetastoreConf.setLongVar(conf, ConfVars.LIMIT_PARTITION_REQUEST, DEFAULT_LIMIT_PARTITION_REQUEST);
+     MetastoreConf.setVar(conf, ConfVars.STORAGE_SCHEMA_READER_IMPL, "no.such.class");
+   }
+ 
+   @Test
+   public void testNameMethods() {
+     Map<String, String> spec = new LinkedHashMap<>();
+     spec.put("ds", "2008-07-01 14:13:12");
+     spec.put("hr", "14");
+     List<String> vals = new ArrayList<>();
+     vals.addAll(spec.values());
+     String partName = "ds=2008-07-01 14%3A13%3A12/hr=14";
+ 
+     try {
+       List<String> testVals = client.partitionNameToVals(partName);
+       assertTrue("Values from name are incorrect", vals.equals(testVals));
+ 
+       Map<String, String> testSpec = client.partitionNameToSpec(partName);
+       assertTrue("Spec from name is incorrect", spec.equals(testSpec));
+ 
+       List<String> emptyVals = client.partitionNameToVals("");
+       assertTrue("Values should be empty", emptyVals.size() == 0);
+ 
+       Map<String, String> emptySpec =  client.partitionNameToSpec("");
+       assertTrue("Spec should be empty", emptySpec.size() == 0);
+     } catch (Exception e) {
+       fail();
+     }
+   }
+ 
+   /**
+    * tests create table and partition and tries to drop the table without
+    * droppping the partition
+    *
+    */
+   @Test
+   public void testPartition() throws Exception {
+     partitionTester(client, conf);
+   }
+ 
+   private static void partitionTester(HiveMetaStoreClient client, Configuration conf)
+     throws Exception {
+     try {
+       String dbName = "compdb";
+       String tblName = "comptbl";
+       String typeName = "Person";
+       List<String> vals = makeVals("2008-07-01 14:13:12", "14");
+       List<String> vals2 = makeVals("2008-07-01 14:13:12", "15");
+       List<String> vals3 = makeVals("2008-07-02 14:13:12", "15");
+       List<String> vals4 = makeVals("2008-07-03 14:13:12", "151");
+ 
+       client.dropTable(dbName, tblName);
+       silentDropDatabase(dbName);
+       new DatabaseBuilder()
+           .setName(dbName)
+           .create(client, conf);
+       Database db = client.getDatabase(dbName);
+       Path dbPath = new Path(db.getLocationUri());
+       FileSystem fs = FileSystem.get(dbPath.toUri(), conf);
+ 
+       client.dropType(typeName);
+       Type typ1 = new Type();
+       typ1.setName(typeName);
+       typ1.setFields(new ArrayList<>(2));
+       typ1.getFields().add(
+           new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+       typ1.getFields().add(
+           new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+       client.createType(typ1);
+ 
+       List<String> skewedColValue = Collections.singletonList("1");
+       Table tbl = new TableBuilder()
+           .setDbName(dbName)
+           .setTableName(tblName)
+           .setCols(typ1.getFields())
+           .setNumBuckets(1)
+           .addBucketCol("name")
+           .addTableParam("test_param_1", "Use this for comments etc")
+           .addSerdeParam(ColumnType.SERIALIZATION_FORMAT, "1")
+           .addSkewedColName("name")
+           .setSkewedColValues(Collections.singletonList(skewedColValue))
+           .setSkewedColValueLocationMaps(Collections.singletonMap(skewedColValue, "location1"))
+           .addPartCol("ds", ColumnType.STRING_TYPE_NAME)
+           .addPartCol("hr", ColumnType.STRING_TYPE_NAME)
+           .create(client, conf);
+ 
+       if (isThriftClient) {
+         // the createTable() above does not update the location in the 'tbl'
+         // object when the client is a thrift client and the code below relies
+         // on the location being present in the 'tbl' object - so get the table
+         // from the metastore
+         tbl = client.getTable(dbName, tblName);
+       }
+ 
+       Partition part = makePartitionObject(dbName, tblName, vals, tbl, "/part1");
+       Partition part2 = makePartitionObject(dbName, tblName, vals2, tbl, "/part2");
+       Partition part3 = makePartitionObject(dbName, tblName, vals3, tbl, "/part3");
+       Partition part4 = makePartitionObject(dbName, tblName, vals4, tbl, "/part4");
+ 
+       // check if the partition exists (it shouldn't)
+       boolean exceptionThrown = false;
+       try {
+         Partition p = client.getPartition(dbName, tblName, vals);
+       } catch(Exception e) {
+         assertEquals("partition should not have existed",
+             NoSuchObjectException.class, e.getClass());
+         exceptionThrown = true;
+       }
+       assertTrue("getPartition() should have thrown NoSuchObjectException", exceptionThrown);
+       Partition retp = client.add_partition(part);
+       assertNotNull("Unable to create partition " + part, retp);
+       Partition retp2 = client.add_partition(part2);
+       assertNotNull("Unable to create partition " + part2, retp2);
+       Partition retp3 = client.add_partition(part3);
+       assertNotNull("Unable to create partition " + part3, retp3);
+       Partition retp4 = client.add_partition(part4);
+       assertNotNull("Unable to create partition " + part4, retp4);
+ 
+       Partition part_get = client.getPartition(dbName, tblName, part.getValues());
 -      if(isThriftClient) {
 -        // since we are using thrift, 'part' will not have the create time and
 -        // last DDL time set since it does not get updated in the add_partition()
 -        // call - likewise part2 and part3 - set it correctly so that equals check
 -        // doesn't fail
 -        adjust(client, part, dbName, tblName);
 -        adjust(client, part2, dbName, tblName);
 -        adjust(client, part3, dbName, tblName);
 -      }
++      // since we are using thrift, 'part' will not have the create time and
++      // last DDL time set since it does not get updated in the add_partition()
++      // call - likewise part2 and part3 - set it correctly so that equals check
++      // doesn't fail
++      adjust(client, part, dbName, tblName, isThriftClient);
++      adjust(client, part2, dbName, tblName, isThriftClient);
++      adjust(client, part3, dbName, tblName, isThriftClient);
+       assertTrue("Partitions are not same", part.equals(part_get));
+ 
+       // check null cols schemas for a partition
+       List<String> vals6 = makeVals("2016-02-22 00:00:00", "16");
+       Partition part6 = makePartitionObject(dbName, tblName, vals6, tbl, "/part5");
+       part6.getSd().setCols(null);
+       LOG.info("Creating partition will null field schema");
+       client.add_partition(part6);
+       LOG.info("Listing all partitions for table " + dbName + "." + tblName);
+       final List<Partition> partitions = client.listPartitions(dbName, tblName, (short) -1);
+       boolean foundPart = false;
+       for (Partition p : partitions) {
+         if (p.getValues().equals(vals6)) {
+           assertNull(p.getSd().getCols());
+           LOG.info("Found partition " + p + " having null field schema");
+           foundPart = true;
+         }
+       }
+       assertTrue(foundPart);
+ 
+       String partName = "ds=" + FileUtils.escapePathName("2008-07-01 14:13:12") + "/hr=14";
+       String part2Name = "ds=" + FileUtils.escapePathName("2008-07-01 14:13:12") + "/hr=15";
+       String part3Name = "ds=" + FileUtils.escapePathName("2008-07-02 14:13:12") + "/hr=15";
+       String part4Name = "ds=" + FileUtils.escapePathName("2008-07-03 14:13:12") + "/hr=151";
+ 
+       part_get = client.getPartition(dbName, tblName, partName);
+       assertTrue("Partitions are not the same", part.equals(part_get));
+ 
+       // Test partition listing with a partial spec - ds is specified but hr is not
+       List<String> partialVals = new ArrayList<>();
+       partialVals.add(vals.get(0));
+       Set<Partition> parts = new HashSet<>();
+       parts.add(part);
+       parts.add(part2);
+ 
+       List<Partition> partial = client.listPartitions(dbName, tblName, partialVals,
+           (short) -1);
+       assertTrue("Should have returned 2 partitions", partial.size() == 2);
+       assertTrue("Not all parts returned", partial.containsAll(parts));
+ 
+       Set<String> partNames = new HashSet<>();
+       partNames.add(partName);
+       partNames.add(part2Name);
+       List<String> partialNames = client.listPartitionNames(dbName, tblName, partialVals,
+           (short) -1);
+       assertTrue("Should have returned 2 partition names", partialNames.size() == 2);
+       assertTrue("Not all part names returned", partialNames.containsAll(partNames));
+ 
+       partNames.add(part3Name);
+       partNames.add(part4Name);
+       partialVals.clear();
+       partialVals.add("");
+       partialNames = client.listPartitionNames(dbName, tblName, partialVals, (short) -1);
+       assertTrue("Should have returned 5 partition names", partialNames.size() == 5);
+       assertTrue("Not all part names returned", partialNames.containsAll(partNames));
+ 
+       // Test partition listing with a partial spec - hr is specified but ds is not
+       parts.clear();
+       parts.add(part2);
+       parts.add(part3);
+ 
+       partialVals.clear();
+       partialVals.add("");
+       partialVals.add(vals2.get(1));
+ 
+       partial = client.listPartitions(dbName, tblName, partialVals, (short) -1);
+       assertEquals("Should have returned 2 partitions", 2, partial.size());
+       assertTrue("Not all parts returned", partial.containsAll(parts));
+ 
+       partNames.clear();
+       partNames.add(part2Name);
+       partNames.add(part3Name);
+       partialNames = client.listPartitionNames(dbName, tblName, partialVals,
+           (short) -1);
+       assertEquals("Should have returned 2 partition names", 2, partialNames.size());
+       assertTrue("Not all part names returned", partialNames.containsAll(partNames));
+ 
+       // Verify escaped partition names don't return partitions
+       exceptionThrown = false;
+       try {
+         String badPartName = "ds=2008-07-01 14%3A13%3A12/hrs=14";
+         client.getPartition(dbName, tblName, badPartName);
+       } catch(NoSuchObjectException e) {
+         exceptionThrown = true;
+       }
+       assertTrue("Bad partition spec should have thrown an exception", exceptionThrown);
+ 
+       Path partPath = new Path(part.getSd().getLocation());
+ 
+ 
+       assertTrue(fs.exists(partPath));
+       client.dropPartition(dbName, tblName, part.getValues(), true);
+       assertFalse(fs.exists(partPath));
+ 
+       // Test append_partition_by_name
+       client.appendPartition(dbName, tblName, partName);
+       Partition part5 = client.getPartition(dbName, tblName, part.getValues());
+       assertTrue("Append partition by name failed", part5.getValues().equals(vals));
+       Path part5Path = new Path(part5.getSd().getLocation());
+       assertTrue(fs.exists(part5Path));
+ 
+       // Test drop_partition_by_name
+       assertTrue("Drop partition by name failed",
+           client.dropPartition(dbName, tblName, partName, true));
+       assertFalse(fs.exists(part5Path));
+ 
+       // add the partition again so that drop table with a partition can be
+       // tested
+       retp = client.add_partition(part);
+       assertNotNull("Unable to create partition " + part, retp);
+ 
+       // test add_partitions
+ 
+       List<String> mvals1 = makeVals("2008-07-04 14:13:12", "14641");
+       List<String> mvals2 = makeVals("2008-07-04 14:13:12", "14642");
+       List<String> mvals3 = makeVals("2008-07-04 14:13:12", "14643");
+       List<String> mvals4 = makeVals("2008-07-04 14:13:12", "14643"); // equal to 3
+       List<String> mvals5 = makeVals("2008-07-04 14:13:12", "14645");
+ 
+       Exception savedException;
+ 
+       // add_partitions(empty list) : ok, normal operation
+       client.add_partitions(new ArrayList<>());
+ 
+       // add_partitions(1,2,3) : ok, normal operation
+       Partition mpart1 = makePartitionObject(dbName, tblName, mvals1, tbl, "/mpart1");
+       Partition mpart2 = makePartitionObject(dbName, tblName, mvals2, tbl, "/mpart2");
+       Partition mpart3 = makePartitionObject(dbName, tblName, mvals3, tbl, "/mpart3");
+       client.add_partitions(Arrays.asList(mpart1,mpart2,mpart3));
+ 
 -      if(isThriftClient) {
 -        // do DDL time munging if thrift mode
 -        adjust(client, mpart1, dbName, tblName);
 -        adjust(client, mpart2, dbName, tblName);
 -        adjust(client, mpart3, dbName, tblName);
 -      }
++      // do DDL time munging if thrift mode
++      adjust(client, mpart1, dbName, tblName, isThriftClient);
++      adjust(client, mpart2, dbName, tblName, isThriftClient);
++      adjust(client, mpart3, dbName, tblName, isThriftClient);
+       verifyPartitionsPublished(client, dbName, tblName,
+           Arrays.asList(mvals1.get(0)),
+           Arrays.asList(mpart1,mpart2,mpart3));
+ 
+       Partition mpart4 = makePartitionObject(dbName, tblName, mvals4, tbl, "/mpart4");
+       Partition mpart5 = makePartitionObject(dbName, tblName, mvals5, tbl, "/mpart5");
+ 
+       // create dir for /mpart5
+       Path mp5Path = new Path(mpart5.getSd().getLocation());
+       warehouse.mkdirs(mp5Path);
+       assertTrue(fs.exists(mp5Path));
+ 
+       // add_partitions(5,4) : err = duplicate keyvals on mpart4
+       savedException = null;
+       try {
+         client.add_partitions(Arrays.asList(mpart5,mpart4));
+       } catch (Exception e) {
+         savedException = e;
+       } finally {
+         assertNotNull(savedException);
+       }
+ 
+       // check that /mpart4 does not exist, but /mpart5 still does.
+       assertTrue(fs.exists(mp5Path));
+       assertFalse(fs.exists(new Path(mpart4.getSd().getLocation())));
+ 
+       // add_partitions(5) : ok
+       client.add_partitions(Arrays.asList(mpart5));
+ 
 -      if(isThriftClient) {
 -        // do DDL time munging if thrift mode
 -        adjust(client, mpart5, dbName, tblName);
 -      }
++      // do DDL time munging if thrift mode
++      adjust(client, mpart5, dbName, tblName, isThriftClient);
+ 
+       verifyPartitionsPublished(client, dbName, tblName,
+           Arrays.asList(mvals1.get(0)),
+           Arrays.asList(mpart1,mpart2,mpart3,mpart5));
+ 
+       //// end add_partitions tests
+ 
+       client.dropTable(dbName, tblName);
+ 
+       client.dropType(typeName);
+ 
+       // recreate table as external, drop partition and it should
+       // still exist
+       tbl.setParameters(new HashMap<>());
+       tbl.getParameters().put("EXTERNAL", "TRUE");
+       client.createTable(tbl);
+       retp = client.add_partition(part);
+       assertTrue(fs.exists(partPath));
+       client.dropPartition(dbName, tblName, part.getValues(), true);
+       assertTrue(fs.exists(partPath));
+ 
+       for (String tableName : client.getTables(dbName, "*")) {
+         client.dropTable(dbName, tableName);
+       }
+ 
+       client.dropDatabase(dbName);
+ 
+     } catch (Exception e) {
+       System.err.println(StringUtils.stringifyException(e));
+       System.err.println("testPartition() failed.");
+       throw e;
+     }
+   }
+ 
+   private static void verifyPartitionsPublished(HiveMetaStoreClient client,
+       String dbName, String tblName, List<String> partialSpec,
+       List<Partition> expectedPartitions) throws TException {
+     // Test partition listing with a partial spec
+ 
+     List<Partition> mpartial = client.listPartitions(dbName, tblName, partialSpec,
+         (short) -1);
+     assertEquals("Should have returned "+expectedPartitions.size()+
+         " partitions, returned " + mpartial.size(),
+         expectedPartitions.size(), mpartial.size());
+     assertTrue("Not all parts returned", mpartial.containsAll(expectedPartitions));
+   }
+ 
+   private static List<String> makeVals(String ds, String id) {
+     List <String> vals4 = new ArrayList<>(2);
+     vals4 = new ArrayList<>(2);
+     vals4.add(ds);
+     vals4.add(id);
+     return vals4;
+   }
+ 
+   private static Partition makePartitionObject(String dbName, String tblName,
+       List<String> ptnVals, Table tbl, String ptnLocationSuffix) throws MetaException {
+     Partition part4 = new Partition();
+     part4.setDbName(dbName);
+     part4.setTableName(tblName);
+     part4.setValues(ptnVals);
+     part4.setParameters(new HashMap<>());
+     part4.setSd(tbl.getSd().deepCopy());
+     part4.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo().deepCopy());
+     part4.getSd().setLocation(tbl.getSd().getLocation() + ptnLocationSuffix);
+     MetaStoreUtils.updatePartitionStatsFast(part4, tbl, warehouse, false, false, null, true);
+     return part4;
+   }
+ 
+   @Test
+   public void testListPartitions() throws Throwable {
+     // create a table with multiple partitions
+     String dbName = "compdb";
+     String tblName = "comptbl";
+     String typeName = "Person";
+ 
+     cleanUp(dbName, tblName, typeName);
+ 
+     List<List<String>> values = new ArrayList<>();
+     values.add(makeVals("2008-07-01 14:13:12", "14"));
+     values.add(makeVals("2008-07-01 14:13:12", "15"));
+     values.add(makeVals("2008-07-02 14:13:12", "15"));
+     values.add(makeVals("2008-07-03 14:13:12", "151"));
+ 
+     createMultiPartitionTableSchema(dbName, tblName, typeName, values);
+ 
+     List<Partition> partitions = client.listPartitions(dbName, tblName, (short)-1);
+     assertNotNull("should have returned partitions", partitions);
+     assertEquals(" should have returned " + values.size() +
+       " partitions", values.size(), partitions.size());
+ 
+     partitions = client.listPartitions(dbName, tblName, (short)(values.size()/2));
+ 
+     assertNotNull("should have returned partitions", partitions);
+     assertEquals(" should have returned " + values.size() / 2 +
+       " partitions",values.size() / 2, partitions.size());
+ 
+ 
+     partitions = client.listPartitions(dbName, tblName, (short) (values.size() * 2));
+ 
+     assertNotNull("should have returned partitions", partitions);
+     assertEquals(" should have returned " + values.size() +
+       " partitions",values.size(), partitions.size());
+ 
+     cleanUp(dbName, tblName, typeName);
+ 
+   }
+ 
+   @Test
+   public void testListPartitionsWihtLimitEnabled() throws Throwable {
+     // create a table with multiple partitions
+     String dbName = "compdb";
+     String tblName = "comptbl";
+     String typeName = "Person";
+ 
+     cleanUp(dbName, tblName, typeName);
+ 
+     // Create too many partitions, just enough to validate over limit requests
+     List<List<String>> values = new ArrayList<>();
+     for (int i=0; i<DEFAULT_LIMIT_PARTITION_REQUEST + 1; i++) {
+       values.add(makeVals("2008-07-01 14:13:12", Integer.toString(i)));
+     }
+ 
+     createMultiPartitionTableSchema(dbName, tblName, typeName, values);
+ 
+     List<Partition> partitions;
+     short maxParts;
+ 
+     // Requesting more partitions than allowed should throw an exception
+     try {
+       maxParts = -1;
+       partitions = client.listPartitions(dbName, tblName, maxParts);
+       fail("should have thrown MetaException about partition limit");
+     } catch (MetaException e) {
+       assertTrue(true);
+     }
+ 
+     // Requesting more partitions than allowed should throw an exception
+     try {
+       maxParts = DEFAULT_LIMIT_PARTITION_REQUEST + 1;
+       partitions = client.listPartitions(dbName, tblName, maxParts);
+       fail("should have thrown MetaException about partition limit");
+     } catch (MetaException e) {
+       assertTrue(true);
+     }
+ 
+     // Requesting less partitions than allowed should work
+     maxParts = DEFAULT_LIMIT_PARTITION_REQUEST / 2;
+     partitions = client.listPartitions(dbName, tblName, maxParts);
+     assertNotNull("should have returned partitions", partitions);
+     assertEquals(" should have returned 50 partitions", maxParts, partitions.size());
+   }
+ 
+   @Test
+   public void testAlterTableCascade() throws Throwable {
+     // create a table with multiple partitions
+     String dbName = "compdb";
+     String tblName = "comptbl";
+     String typeName = "Person";
+ 
+     cleanUp(dbName, tblName, typeName);
+ 
+     List<List<String>> values = new ArrayList<>();
+     values.add(makeVals("2008-07-01 14:13:12", "14"));
+     values.add(makeVals("2008-07-01 14:13:12", "15"));
+     values.add(makeVals("2008-07-02 14:13:12", "15"));
+     values.add(makeVals("2008-07-03 14:13:12", "151"));
+ 
+     createMultiPartitionTableSchema(dbName, tblName, typeName, values);
+     Table tbl = client.getTable(dbName, tblName);
+     List<FieldSchema> cols = tbl.getSd().getCols();
+     cols.add(new FieldSchema("new_col", ColumnType.STRING_TYPE_NAME, ""));
+     tbl.getSd().setCols(cols);
+     //add new column with cascade option
+     client.alter_table(dbName, tblName, tbl, true);
+     //
+     Table tbl2 = client.getTable(dbName, tblName);
+     assertEquals("Unexpected number of cols", 3, tbl2.getSd().getCols().size());
+     assertEquals("Unexpected column name", "new_col", tbl2.getSd().getCols().get(2).getName());
+     //get a partition
+     List<String> pvalues = new ArrayList<>(2);
+     pvalues.add("2008-07-01 14:13:12");
+     pvalues.add("14");
+     Partition partition = client.getPartition(dbName, tblName, pvalues);
+     assertEquals("Unexpected number of cols", 3, partition.getSd().getCols().size());
+     assertEquals("Unexpected column name", "new_col", partition.getSd().getCols().get(2).getName());
+ 
+     //add another column
+     cols = tbl.getSd().getCols();
+     cols.add(new FieldSchema("new_col2", ColumnType.STRING_TYPE_NAME, ""));
+     tbl.getSd().setCols(cols);
+     //add new column with no cascade option
+     client.alter_table(dbName, tblName, tbl, false);
+     tbl2 = client.getTable(dbName, tblName);
+     assertEquals("Unexpected number of cols", 4, tbl2.getSd().getCols().size());
+     assertEquals("Unexpected column name", "new_col2", tbl2.getSd().getCols().get(3).getName());
+     //get partition, this partition should not have the newly added column since cascade option
+     //was false
+     partition = client.getPartition(dbName, tblName, pvalues);
+     assertEquals("Unexpected number of cols", 3, partition.getSd().getCols().size());
+   }
+ 
+ 
+   @Test
+   public void testListPartitionNames() throws Throwable {
+     // create a table with multiple partitions
+     String dbName = "compdb";
+     String tblName = "comptbl";
+     String typeName = "Person";
+ 
+     cleanUp(dbName, tblName, typeName);
+ 
+     List<List<String>> values = new ArrayList<>();
+     values.add(makeVals("2008-07-01 14:13:12", "14"));
+     values.add(makeVals("2008-07-01 14:13:12", "15"));
+     values.add(makeVals("2008-07-02 14:13:12", "15"));
+     values.add(makeVals("2008-07-03 14:13:12", "151"));
+ 
+ 
+ 
+     createMultiPartitionTableSchema(dbName, tblName, typeName, values);
+ 
+     List<String> partitions = client.listPartitionNames(dbName, tblName, (short)-1);
+     assertNotNull("should have returned partitions", partitions);
+     assertEquals(" should have returned " + values.size() +
+       " partitions", values.size(), partitions.size());
+ 
+     partitions = client.listPartitionNames(dbName, tblName, (short)(values.size()/2));
+ 
+     assertNotNull("should have returned partitions", partitions);
+     assertEquals(" should have returned " + values.size() / 2 +
+       " partitions",values.size() / 2, partitions.size());
+ 
+ 
+     partitions = client.listPartitionNames(dbName, tblName, (short) (values.size() * 2));
+ 
+     assertNotNull("should have returned partitions", partitions);
+     assertEquals(" should have returned " + values.size() +
+       " partitions",values.size(), partitions.size());
+ 
+     cleanUp(dbName, tblName, typeName);
+ 
+   }
+ 
+ 
+   @Test
+   public void testDropTable() throws Throwable {
+     // create a table with multiple partitions
+     String dbName = "compdb";
+     String tblName = "comptbl";
+     String typeName = "Person";
+ 
+     cleanUp(dbName, tblName, typeName);
+ 
+     List<List<String>> values = new ArrayList<>();
+     values.add(makeVals("2008-07-01 14:13:12", "14"));
+     values.add(makeVals("2008-07-01 14:13:12", "15"));
+     values.add(makeVals("2008-07-02 14:13:12", "15"));
+     values.add(makeVals("2008-07-03 14:13:12", "151"));
+ 
+     createMultiPartitionTableSchema(dbName, tblName, typeName, values);
+ 
+     client.dropTable(dbName, tblName);
+     client.dropType(typeName);
+ 
+     boolean exceptionThrown = false;
+     try {
+       client.getTable(dbName, tblName);
+     } catch(Exception e) {
+       assertEquals("table should not have existed",
+           NoSuchObjectException.class, e.getClass());
+       exceptionThrown = true;
+     }
+     assertTrue("Table " + tblName + " should have been dropped ", exceptionThrown);
+ 
+   }
+ 
+   @Test
+   public void testAlterViewParititon() throws Throwable {
+     String dbName = "compdb";
+     String tblName = "comptbl";
+     String viewName = "compView";
+ 
+     client.dropTable(dbName, tblName);
+     silentDropDatabase(dbName);
+     new DatabaseBuilder()
+         .setName(dbName)
+         .setDescription("Alter Partition Test database")
+         .create(client, conf);
+ 
+     Table tbl = new TableBuilder()
+         .setDbName(dbName)
+         .setTableName(tblName)
+         .addCol("name", ColumnType.STRING_TYPE_NAME)
+         .addCol("income", ColumnType.INT_TYPE_NAME)
+         .create(client, conf);
+ 
+     if (isThriftClient) {
+       // the createTable() above does not update the location in the 'tbl'
+       // object when the client is a thrift client and the code below relies
+       // on the location being present in the 'tbl' object - so get the table
+       // from the metastore
+       tbl = client.getTable(dbName, tblName);
+     }
+ 
+     ArrayList<FieldSchema> viewCols = new ArrayList<>(1);
+     viewCols.add(new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+ 
+     ArrayList<FieldSchema> viewPartitionCols = new ArrayList<>(1);
+     viewPartitionCols.add(new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+ 
+     Table view = new Table();
+     view.setDbName(dbName);
+     view.setTableName(viewName);
+     view.setTableType(TableType.VIRTUAL_VIEW.name());
+     view.setPartitionKeys(viewPartitionCols);
+     view.setViewOriginalText("SELECT income, name FROM " + tblName);
+     view.setViewExpandedText("SELECT `" + tblName + "`.`income`, `" + tblName +
+         "`.`name` FROM `" + dbName + "`.`" + tblName + "`");
+     view.setRewriteEnabled(false);
+     StorageDescriptor viewSd = new StorageDescriptor();
+     view.setSd(viewSd);
+     viewSd.setCols(viewCols);
+     viewSd.setCompressed(false);
+     viewSd.setParameters(new HashMap<>());
+     viewSd.setSerdeInfo(new SerDeInfo());
+     viewSd.getSerdeInfo().setParameters(new HashMap<>());
+ 
+     client.createTable(view);
+ 
+     if (isThriftClient) {
+       // the createTable() above does not update the location in the 'tbl'
+       // object when the client is a thrift client and the code below relies
+       // on the location being present in the 'tbl' object - so get the table
+       // from the metastore
+       view = client.getTable(dbName, viewName);
+     }
+ 
+     List<String> vals = new ArrayList<>(1);
+     vals.add("abc");
+ 
+     Partition part = new Partition();
+     part.setDbName(dbName);
+     part.setTableName(viewName);
+     part.setValues(vals);
+     part.setParameters(new HashMap<>());
+ 
+     client.add_partition(part);
+ 
+     Partition part2 = client.getPartition(dbName, viewName, part.getValues());
+ 
+     part2.getParameters().put("a", "b");
+ 
+     client.alter_partition(dbName, viewName, part2, null);
+ 
+     Partition part3 = client.getPartition(dbName, viewName, part.getValues());
+     assertEquals("couldn't view alter partition", part3.getParameters().get(
+         "a"), "b");
+ 
+     client.dropTable(dbName, viewName);
+ 
+     client.dropTable(dbName, tblName);
+ 
+     client.dropDatabase(dbName);
+   }
+ 
+   @Test
+   public void testAlterPartition() throws Throwable {
+ 
+     try {
+       String dbName = "compdb";
+       String tblName = "comptbl";
+       List<String> vals = new ArrayList<>(2);
+       vals.add("2008-07-01");
+       vals.add("14");
+ 
+       client.dropTable(dbName, tblName);
+       silentDropDatabase(dbName);
+       new DatabaseBuilder()
+           .setName(dbName)
+           .setDescription("Alter Partition Test database")
+           .create(client, conf);
+ 
+       Table tbl = new TableBuilder()
+           .setDbName(dbName)
+           .setTableName(tblName)
+           .addCol("name", ColumnType.STRING_TYPE_NAME)
+           .addCol("income", ColumnType.INT_TYPE_NAME)
+           .addTableParam("test_param_1", "Use this for comments etc")
+           .addBucketCol("name")
+           .addSerdeParam(ColumnType.SERIALIZATION_FORMAT, "1")
+           .addPartCol("ds", ColumnType.STRING_TYPE_NAME)
+           .addPartCol("hr", ColumnType.INT_TYPE_NAME)
+           .create(client, conf);
+ 
+       if (isThriftClient) {
+         // the createTable() above does not update the location in the 'tbl'
+         // object when the client is a thrift client and the code below relies
+         // on the location being present in the 'tbl' object - so get the table
+         // from the metastore
+         tbl = client.getTable(dbName, tblName);
+       }
+ 
+       Partition part = new Partition();
+       part.setDbName(dbName);
+       part.setTableName(tblName);
+       part.setValues(vals);
+       part.setParameters(new HashMap<>());
+       part.setSd(tbl.getSd());
+       part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
+       part.getSd().setLocation(tbl.getSd().getLocation() + "/part1");
+ 
+       client.add_partition(part);
+ 
+       Partition part2 = client.getPartition(dbName, tblName, part.getValues());
+ 
+       part2.getParameters().put("retention", "10");
+       part2.getSd().setNumBuckets(12);
+       part2.getSd().getSerdeInfo().getParameters().put("abc", "1");
+       client.alter_partition(dbName, tblName, part2, null);
+ 
+       Partition part3 = client.getPartition(dbName, tblName, part.getValues());
+       assertEquals("couldn't alter partition", part3.getParameters().get(
+           "retention"), "10");
+       assertEquals("couldn't alter partition", part3.getSd().getSerdeInfo()
+           .getParameters().get("abc"), "1");
+       assertEquals("couldn't alter partition", part3.getSd().getNumBuckets(),
+           12);
+ 
+       client.dropTable(dbName, tblName);
+ 
+       client.dropDatabase(dbName);
+     } catch (Exception e) {
+       System.err.println(StringUtils.stringifyException(e));
+       System.err.println("testPartition() failed.");
+       throw e;
+     }
+   }
+ 
+   @Test
+   public void testRenamePartition() throws Throwable {
+ 
+     try {
+       String dbName = "compdb1";
+       String tblName = "comptbl1";
+       List<String> vals = new ArrayList<>(2);
+       vals.add("2011-07-11");
+       vals.add("8");
+       String part_path = "/ds=2011-07-11/hr=8";
+       List<String> tmp_vals = new ArrayList<>(2);
+       tmp_vals.add("tmp_2011-07-11");
+       tmp_vals.add("-8");
+       String part2_path = "/ds=tmp_2011-07-11/hr=-8";
+ 
+       client.dropTable(dbName, tblName);
+       silentDropDatabase(dbName);
+       new DatabaseBuilder()
+           .setName(dbName)
+           .setDescription("Rename Partition Test database")
+           .create(client, conf);
+ 
+       Table tbl = new TableBuilder()
+           .setDbName(dbName)
+           .setTableName(tblName)
+           .addCol("name", ColumnType.STRING_TYPE_NAME)
+           .addCol("income", ColumnType.INT_TYPE_NAME)
+           .addPartCol("ds", ColumnType.STRING_TYPE_NAME)
+           .addPartCol("hr", ColumnType.INT_TYPE_NAME)
+           .create(client, conf);
+ 
+       if (isThriftClient) {
+         // the createTable() above does not update the location in the 'tbl'
+         // object when the client is a thrift client and the code below relies
+         // on the location being present in the 'tbl' object - so get the table
+         // from the metastore
+         tbl = client.getTable(dbName, tblName);
+       }
+ 
+       Partition part = new Partition();
+       part.setDbName(dbName);
+       part.setTableName(tblName);
+       part.setValues(vals);
+       part.setParameters(new HashMap<>());
+       part.setSd(tbl.getSd().deepCopy());
+       part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
+       part.getSd().setLocation(tbl.getSd().getLocation() + "/part1");
+       part.getParameters().put("retention", "10");
+       part.getSd().setNumBuckets(12);
+       part.getSd().getSerdeInfo().getParameters().put("abc", "1");
+ 
+       client.add_partition(part);
+ 
+       part.setValues(tmp_vals);
+       client.renamePartition(dbName, tblName, vals, part);
+ 
+       boolean exceptionThrown = false;
+       try {
+         Partition p = client.getPartition(dbName, tblName, vals);
+       } catch(Exception e) {
+         assertEquals("partition should not have existed",
+             NoSuchObjectException.class, e.getClass());
+         exceptionThrown = true;
+       }
+       assertTrue("Expected NoSuchObjectException", exceptionThrown);
+ 
+       Partition part3 = client.getPartition(dbName, tblName, tmp_vals);
+       assertEquals("couldn't rename partition", part3.getParameters().get(
+           "retention"), "10");
+       assertEquals("couldn't rename partition", part3.getSd().getSerdeInfo()
+           .getParameters().get("abc"), "1");
+       assertEquals("couldn't rename partition", part3.getSd().getNumBuckets(),
+           12);
+       assertEquals("new partition sd matches", part3.getSd().getLocation(),
+           tbl.getSd().getLocation() + part2_path);
+ 
+       part.setValues(vals);
+       client.renamePartition(dbName, tblName, tmp_vals, part);
+ 
+       exceptionThrown = false;
+       try {
+         Partition p = client.getPartition(dbName, tblName, tmp_vals);
+       } catch(Exception e) {
+         assertEquals("partition should not have existed",
+             NoSuchObjectException.class, e.getClass());
+         exceptionThrown = true;
+       }
+       assertTrue("Expected NoSuchObjectException", exceptionThrown);
+ 
+       part3 = client.getPartition(dbName, tblName, vals);
+       assertEquals("couldn't rename partition", part3.getParameters().get(
+           "retention"), "10");
+       assertEquals("couldn't rename partition", part3.getSd().getSerdeInfo()
+           .getParameters().get("abc"), "1");
+       assertEquals("couldn't rename partition", part3.getSd().getNumBuckets(),
+           12);
+       assertEquals("new partition sd matches", part3.getSd().getLocation(),
+           tbl.getSd().getLocation() + part_path);
+ 
+       client.dropTable(dbName, tblName);
+ 
+       client.dropDatabase(dbName);
+     } catch (Exception e) {
+       System.err.println(StringUtils.stringifyException(e));
+       System.err.println("testRenamePartition() failed.");
+       throw e;
+     }
+   }
+ 
+   @Test
+   public void testDatabase() throws Throwable {
+     try {
+       // clear up any existing databases
+       silentDropDatabase(TEST_DB1_NAME);
+       silentDropDatabase(TEST_DB2_NAME);
+ 
+       Database db = new DatabaseBuilder()
+           .setName(TEST_DB1_NAME)
+           .setOwnerName(SecurityUtils.getUser())
+           .build(conf);
+       Assert.assertEquals(SecurityUtils.getUser(), db.getOwnerName());
+       client.createDatabase(db);
+ 
+       db = client.getDatabase(TEST_DB1_NAME);
+ 
+       assertEquals("name of returned db is different from that of inserted db",
+           TEST_DB1_NAME, db.getName());
+       assertEquals("location of the returned db is different from that of inserted db",
+           warehouse.getDatabasePath(db).toString(), db.getLocationUri());
+       assertEquals(db.getOwnerName(), SecurityUtils.getUser());
+       assertEquals(db.getOwnerType(), PrincipalType.USER);
+       assertEquals(Warehouse.DEFAULT_CATALOG_NAME, db.getCatalogName());
+       Database db2 = new DatabaseBuilder()
+           .setName(TEST_DB2_NAME)
+           .create(client, conf);
+ 
+       db2 = client.getDatabase(TEST_DB2_NAME);
+ 
+       assertEquals("name of returned db is different from that of inserted db",
+           TEST_DB2_NAME, db2.getName());
+       assertEquals("location of the returned db is different from that of inserted db",
+           warehouse.getDatabasePath(db2).toString(), db2.getLocationUri());
+ 
+       List<String> dbs = client.getDatabases(".*");
+ 
+       assertTrue("first database is not " + TEST_DB1_NAME, dbs.contains(TEST_DB1_NAME));
+       assertTrue("second database is not " + TEST_DB2_NAME, dbs.contains(TEST_DB2_NAME));
+ 
+       client.dropDatabase(TEST_DB1_NAME);
+       client.dropDatabase(TEST_DB2_NAME);
+       silentDropDatabase(TEST_DB1_NAME);
+       silentDropDatabase(TEST_DB2_NAME);
+     } catch (Throwable e) {
+       System.err.println(StringUtils.stringifyException(e));
+       System.err.println("testDatabase() failed.");
+       throw e;
+     }
+   }
+ 
+   @Test
+   public void testDatabaseLocationWithPermissionProblems() throws Exception {
+ 
+     // Note: The following test will fail if you are running this test as root. Setting
+     // permission to '0' on the database folder will not preclude root from being able
+     // to create the necessary files.
+ 
+     if (System.getProperty("user.name").equals("root")) {
+       System.err.println("Skipping test because you are running as root!");
+       return;
+     }
+ 
+     silentDropDatabase(TEST_DB1_NAME);
+ 
+     String dbLocation =
+       MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/test/_testDB_create_";
+     FileSystem fs = FileSystem.get(new Path(dbLocation).toUri(), conf);
+     fs.mkdirs(
+               new Path(MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/test"),
+               new FsPermission((short) 0));
+     Database db = new DatabaseBuilder()
+         .setName(TEST_DB1_NAME)
+         .setLocation(dbLocation)
+         .build(conf);
+ 
+ 
+     boolean createFailed = false;
+     try {
+       client.createDatabase(db);
+     } catch (MetaException cantCreateDB) {
+       createFailed = true;
+     } finally {
+       // Cleanup
+       if (!createFailed) {
+         try {
+           client.dropDatabase(TEST_DB1_NAME);
+         } catch(Exception e) {
+           System.err.println("Failed to remove database in cleanup: " + e.getMessage());
+         }
+       }
+ 
+       fs.setPermission(new Path(MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/test"),
+                        new FsPermission((short) 755));
+       fs.delete(new Path(MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/test"), true);
+     }
+ 
+     assertTrue("Database creation succeeded even with permission problem", createFailed);
+   }
+ 
+   @Test
+   public void testDatabaseLocation() throws Throwable {
+     try {
+       // clear up any existing databases
+       silentDropDatabase(TEST_DB1_NAME);
+ 
+       String dbLocation =
+           MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/_testDB_create_";
+       new DatabaseBuilder()
+           .setName(TEST_DB1_NAME)
+           .setLocation(dbLocation)
+           .create(client, conf);
+ 
+       Database db = client.getDatabase(TEST_DB1_NAME);
+ 
+       assertEquals("name of returned db is different from that of inserted db",
+           TEST_DB1_NAME, db.getName());
+       assertEquals("location of the returned db is different from that of inserted db",
+           warehouse.getDnsPath(new Path(dbLocation)).toString(), db.getLocationUri());
+ 
+       client.dropDatabase(TEST_DB1_NAME);
+       silentDropDatabase(TEST_DB1_NAME);
+ 
+       boolean objectNotExist = false;
+       try {
+         client.getDatabase(TEST_DB1_NAME);
+       } catch (NoSuchObjectException e) {
+         objectNotExist = true;
+       }
+       assertTrue("Database " + TEST_DB1_NAME + " exists ", objectNotExist);
+ 
+       dbLocation =
+           MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "/_testDB_file_";
+       FileSystem fs = FileSystem.get(new Path(dbLocation).toUri(), conf);
+       fs.createNewFile(new Path(dbLocation));
+       fs.deleteOnExit(new Path(dbLocation));
+       db = new DatabaseBuilder()
+           .setName(TEST_DB1_NAME)
+           .setLocation(dbLocation)
+           .build(conf);
+ 
+       boolean createFailed = false;
+       try {
+         client.createDatabase(db);
+       } catch (MetaException cantCreateDB) {
+         System.err.println(cantCreateDB.getMessage());
+         createFailed = true;
+       }
+       assertTrue("Database creation succeeded even location exists and is a file", createFailed);
+ 
+       objectNotExist = false;
+       try {
+         client.getDatabase(TEST_DB1_NAME);
+       } catch (NoSuchObjectException e) {
+         objectNotExist = true;
+       }
+       assertTrue("Database " + TEST_DB1_NAME + " exists when location is specified and is a file",
+           objectNotExist);
+ 
+     } catch (Throwable e) {
+       System.err.println(StringUtils.stringifyException(e));
+       System.err.println("testDatabaseLocation() failed.");
+       throw e;
+     }
+   }
+ 
+ 
+   @Test
+   public void testSimpleTypeApi() throws Exception {
+     try {
+       client.dropType(ColumnType.INT_TYPE_NAME);
+ 
+       Type typ1 = new Type();
+       typ1.setName(ColumnType.INT_TYPE_NAME);
+       boolean ret = client.createType(typ1);
+       assertTrue("Unable to create type", ret);
+ 
+       Type typ1_2 = client.getType(ColumnType.INT_TYPE_NAME);
+       assertNotNull(typ1_2);
+       assertEquals(typ1.getName(), typ1_2.getName());
+ 
+       ret = client.dropType(ColumnType.INT_TYPE_NAME);
+       assertTrue("unable to drop type integer", ret);
+ 
+       boolean exceptionThrown = false;
+       try {
+         client.getType(ColumnType.INT_TYPE_NAME);
+       } catch (NoSuchObjectException e) {
+         exceptionThrown = true;
+       }
+       assertTrue("Expected NoSuchObjectException", exceptionThrown);
+     } catch (Exception e) {
+       System.err.println(StringUtils.stringifyException(e));
+       System.err.println("testSimpleTypeApi() failed.");
+       throw e;
+     }
+   }
+ 
+   // TODO:pc need to enhance this with complex fields and getType_all function
+   @Test
+   public void testComplexTypeApi() throws Exception {
+     try {
+       client.dropType("Person");
+ 
+       Type typ1 = new Type();
+       typ1.setName("Person");
+       typ1.setFields(new ArrayList<>(2));
+       typ1.getFields().add(
+           new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+       typ1.getFields().add(
+           new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+       boolean ret = client.createType(typ1);
+       assertTrue("Unable to create type", ret);
+ 
+       Type typ1_2 = client.getType("Person");
+       assertNotNull("type Person not found", typ1_2);
+       assertEquals(typ1.getName(), typ1_2.getName());
+       assertEquals(typ1.getFields().size(), typ1_2.getFields().size());
+       assertEquals(typ1.getFields().get(0), typ1_2.getFields().get(0));
+       assertEquals(typ1.getFields().get(1), typ1_2.getFields().get(1));
+ 
+       client.dropType("Family");
+ 
+       Type fam = new Type();
+       fam.setName("Family");
+       fam.setFields(new ArrayList<>(2));
+       fam.getFields().add(
+           new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+       fam.getFields().add(
+           new FieldSchema("members",
+               ColumnType.getListType(typ1.getName()), ""));
+ 
+       ret = client.createType(fam);
+       assertTrue("Unable to create type " + fam.getName(), ret);
+ 
+       Type fam2 = client.getType("Family");
+       assertNotNull("type Person not found", fam2);
+       assertEquals(fam.getName(), fam2.getName());
+       assertEquals(fam.getFields().size(), fam2.getFields().size());
+       assertEquals(fam.getFields().get(0), fam2.getFields().get(0));
+       assertEquals(fam.getFields().get(1), fam2.getFields().get(1));
+ 
+       ret = client.dropType("Family");
+       assertTrue("unable to drop type Family", ret);
+ 
+       ret = client.dropType("Person");
+       assertTrue("unable to drop type Person", ret);
+ 
+       boolean exceptionThrown = false;
+       try {
+         client.getType("Person");
+       } catch (NoSuchObjectException e) {
+         exceptionThrown = true;
+       }
+       assertTrue("Expected NoSuchObjectException", exceptionThrown);
+     } catch (Exception e) {
+       System.err.println(StringUtils.stringifyException(e));
+       System.err.println("testComplexTypeApi() failed.");
+       throw e;
+     }
+   }
+ 
+   @Test
+   public void testSimpleTable() throws Exception {
+     try {
+       String dbName = "simpdb";
+       String tblName = "simptbl";
+       String tblName2 = "simptbl2";
+       String typeName = "Person";
+ 
+       client.dropTable(dbName, tblName);
+       silentDropDatabase(dbName);
+ 
+       new DatabaseBuilder()
+           .setName(dbName)
+           .create(client, conf);
+ 
+       client.dropType(typeName);
+       Type typ1 = new Type();
+       typ1.setName(typeName);
+       typ1.setFields(new ArrayList<>(2));
+       typ1.getFields().add(
+           new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+       typ1.getFields().add(
+           new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+       client.createType(typ1);
+ 
+       Table tbl = new TableBuilder()
+           .setDbName(dbName)
+           .setTableName(tblName)
+           .setCols(typ1.getFields())
+           .setNumBuckets(1)
+           .addBucketCol("name")
+           .addStorageDescriptorParam("test_param_1", "Use this for comments etc")
+           .create(client, conf);
+ 
+       if (isThriftClient) {
+         // the createTable() above does not update the location in the 'tbl'
+         // object when the client is a thrift client and the code below relies
+         // on the location being present in the 'tbl' object - so get the table
+         // from the metastore
+         tbl = client.getTable(dbName, tblName);
+       }
+ 
+       Table tbl2 = client.getTable(dbName, tblName);
+       assertNotNull(tbl2);
+       assertEquals(tbl2.getDbName(), dbName);
+       assertEquals(tbl2.getTableName(), tblName);
+       assertEquals(tbl2.getSd().getCols().size(), typ1.getFields().size());
+       assertEquals(tbl2.getSd().isCompressed(), false);
+       assertEquals(tbl2.getSd().getNumBuckets(), 1);
+       assertEquals(tbl2.getSd().getLocation(), tbl.getSd().getLocation());
+       assertNotNull(tbl2.getSd().getSerdeInfo());
+       tbl.getSd().getSerdeInfo().setParameters(new HashMap<>());
+       tbl.getSd().getSerdeInfo().getParameters().put(ColumnType.SERIALIZATION_FORMAT, "1");
+ 
+       tbl2.setTableName(tblName2);
+       tbl2.setParameters(new HashMap<>());
+       tbl2.getParameters().put("EXTERNAL", "TRUE");
+       tbl2.getSd().setLocation(tbl.getSd().getLocation() + "-2");
+ 
+       List<FieldSchema> fieldSchemas = client.getFields(dbName, tblName);
+       assertNotNull(fieldSchemas);
+       assertEquals(fieldSchemas.size(), tbl.getSd().getCols().size());
+       for (FieldSchema fs : tbl.getSd().getCols()) {
+         assertTrue(fieldSchemas.contains(fs));
+       }
+ 
+       List<FieldSchema> fieldSchemasFull = client.getSchema(dbName, tblName);
+       assertNotNull(fieldSchemasFull);
+       assertEquals(fieldSchemasFull.size(), tbl.getSd().getCols().size()
+           + tbl.getPartitionKeys().size());
+       for (FieldSchema fs : tbl.getSd().getCols()) {
+         assertTrue(fieldSchemasFull.contains(fs));
+       }
+       for (FieldSchema fs : tbl.getPartitionKeys()) {
+         assertTrue(fieldSchemasFull.contains(fs));
+       }
+ 
+       client.createTable(tbl2);
+       if (isThriftClient) {
+         tbl2 = client.getTable(tbl2.getDbName(), tbl2.getTableName());
+       }
+ 
+       Table tbl3 = client.getTable(dbName, tblName2);
+       assertNotNull(tbl3);
+       assertEquals(tbl3.getDbName(), dbName);
+       assertEquals(tbl3.getTableName(), tblName2);
+       assertEquals(tbl3.getSd().getCols().size(), typ1.getFields().size());
+       assertEquals(tbl3.getSd().isCompressed(), false);
+       assertEquals(tbl3.getSd().getNumBuckets(), 1);
+       assertEquals(tbl3.getSd().getLocation(), tbl2.getSd().getLocation());
+       assertEquals(tbl3.getParameters(), tbl2.getParameters());
+ 
+       fieldSchemas = client.getFields(dbName, tblName2);
+       assertNotNull(fieldSchemas);
+       assertEquals(fieldSchemas.size(), tbl2.getSd().getCols().size());
+       for (FieldSchema fs : tbl2.getSd().getCols()) {
+         assertTrue(fieldSchemas.contains(fs));
+       }
+ 
+       fieldSchemasFull = client.getSchema(dbName, tblName2);
+       assertNotNull(fieldSchemasFull);
+       assertEquals(fieldSchemasFull.size(), tbl2.getSd().getCols().size()
+           + tbl2.getPartitionKeys().size());
+       for (FieldSchema fs : tbl2.getSd().getCols()) {
+         assertTrue(fieldSchemasFull.contains(fs));
+       }
+       for (FieldSchema fs : tbl2.getPartitionKeys()) {
+         assertTrue(fieldSchemasFull.contains(fs));
+       }
+ 
+       assertEquals("Use this for comments etc", tbl2.getSd().getParameters()
+           .get("test_param_1"));
+       assertEquals("name", tbl2.getSd().getBucketCols().get(0));
+       assertTrue("Partition key list is not empty",
+           (tbl2.getPartitionKeys() == null)
+               || (tbl2.getPartitionKeys().size() == 0));
+ 
+       //test get_table_objects_by_name functionality
+       ArrayList<String> tableNames = new ArrayList<>();
+       tableNames.add(tblName2);
+       tableNames.add(tblName);
+       tableNames.add(tblName2);
+       List<Table> foundTables = client.getTableObjectsByName(dbName, tableNames);
+ 
+       assertEquals(2, foundTables.size());
+       for (Table t: foundTables) {
+         if (t.getTableName().equals(tblName2)) {
+           assertEquals(t.getSd().getLocation(), tbl2.getSd().getLocation());
+         } else {
+           assertEquals(t.getTableName(), tblName);
+           assertEquals(t.getSd().getLocation(), tbl.getSd().getLocation());
+         }
+         assertEquals(t.getSd().getCols().size(), typ1.getFields().size());
+         assertEquals(t.getSd().isCompressed(), false);
+         assertEquals(foundTables.get(0).getSd().getNumBuckets(), 1);
+         assertNotNull(t.getSd().getSerdeInfo());
+         assertEquals(t.getDbName(), dbName);
+       }
+ 
+       tableNames.add(1, "table_that_doesnt_exist");
+       foundTables = client.getTableObjectsByName(dbName, tableNames);
+       assertEquals(foundTables.size(), 2);
+ 
+       InvalidOperationException ioe = null;
+       try {
+         foundTables = client.getTableObjectsByName(dbName, null);
+       } catch (InvalidOperationException e) {
+         ioe = e;
+       }
+       assertNotNull(ioe);
+       assertTrue("Table not found", ioe.getMessage().contains("null tables"));
+ 
+       UnknownDBException udbe = null;
+       try {
+         foundTables = client.getTableObjectsByName("db_that_doesnt_exist", tableNames);
+       } catch (UnknownDBException e) {
+         udbe = e;
+       }
+       assertNotNull(udbe);
+       assertTrue("DB not found",
+           udbe.getMessage().contains("not find database hive.db_that_doesnt_exist"));
+ 
+       udbe = null;
+       try {
+         foundTables = client.getTableObjectsByName("", tableNames);
+       } catch (UnknownDBException e) {
+         udbe = e;
+       }
+       assertNotNull(udbe);
+       assertTrue("DB not found", udbe.getMessage().contains("is null or empty"));
+ 
+       FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), conf);
+       client.dropTable(dbName, tblName);
+       assertFalse(fs.exists(new Path(tbl.getSd().getLocation())));
+ 
+       client.dropTable(dbName, tblName2);
+       assertTrue(fs.exists(new Path(tbl2.getSd().getLocation())));
+ 
+       client.dropType(typeName);
+       client.dropDatabase(dbName);
+     } catch (Exception e) {
+       System.err.println(StringUtils.stringifyException(e));
+       System.err.println("testSimpleTable() failed.");
+       throw e;
+     }
+   }
+ 
+   // Tests that in the absence of stats for partitions, and/or absence of columns
+   // to get stats for, the metastore does not break. See HIVE-12083 for motivation.
+   @Test
+   public void testStatsFastTrivial() throws Throwable {
+     String dbName = "tstatsfast";
+     String tblName = "t1";
+     String tblOwner = "statstester";
+     String typeName = "Person";
+     int lastAccessed = 12083;
+ 
+     cleanUp(dbName,tblName,typeName);
+ 
+     List<List<String>> values = new ArrayList<>();
+     values.add(makeVals("2008-07-01 14:13:12", "14"));
+     values.add(makeVals("2008-07-01 14:13:12", "15"));
+     values.add(makeVals("2008-07-02 14:13:12", "15"));
+     values.add(makeVals("2008-07-03 14:13:12", "151"));
+ 
+     createMultiPartitionTableSchema(dbName, tblName, typeName, values);
+ 
+     List<String> emptyColNames = new ArrayList<>();
+     List<String> emptyPartNames = new ArrayList<>();
+ 
+     List<String> colNames = new ArrayList<>();
+     colNames.add("name");
+     colNames.add("income");
+     List<String> partNames = client.listPartitionNames(dbName,tblName,(short)-1);
+ 
+     assertEquals(0,emptyColNames.size());
+     assertEquals(0,emptyPartNames.size());
+     assertEquals(2,colNames.size());
+     assertEquals(4,partNames.size());
+ 
+     // Test for both colNames and partNames being empty:
+     AggrStats aggrStatsEmpty = client.getAggrColStatsFor(dbName,tblName,emptyColNames,emptyPartNames);
+     assertNotNull(aggrStatsEmpty); // short-circuited on client-side, verifying that it's an empty object, not null
+     assertEquals(0,aggrStatsEmpty.getPartsFound());
+     assertNotNull(aggrStatsEmpty.getColStats());
+     assert(aggrStatsEmpty.getColStats().isEmpty());
+ 
+     // Test for only colNames being empty
+     AggrStats aggrStatsOnlyParts = client.getAggrColStatsFor(dbName,tblName,emptyColNames,partNames);
+     assertNotNull(aggrStatsOnlyParts); // short-circuited on client-side, verifying that it's an empty object, not null
+     assertEquals(0,aggrStatsOnlyParts.getPartsFound());
+     assertNotNull(aggrStatsOnlyParts.getColStats());
+     assert(aggrStatsOnlyParts.getColStats().isEmpty());
+ 
+     // Test for only partNames being empty
+     AggrStats aggrStatsOnlyCols = client.getAggrColStatsFor(dbName,tblName,colNames,emptyPartNames);
+     assertNotNull(aggrStatsOnlyCols); // short-circuited on client-side, verifying that it's an empty object, not null
+     assertEquals(0,aggrStatsOnlyCols.getPartsFound());
+     assertNotNull(aggrStatsOnlyCols.getColStats());
+     assert(aggrStatsOnlyCols.getColStats().isEmpty());
+ 
+     // Test for valid values for both.
+     AggrStats aggrStatsFull = client.getAggrColStatsFor(dbName,tblName,colNames,partNames);
+     assertNotNull(aggrStatsFull);
+     assertEquals(0,aggrStatsFull.getPartsFound()); // would still be empty, because no stats are actually populated.
+     assertNotNull(aggrStatsFull.getColStats());
+     assert(aggrStatsFull.getColStats().isEmpty());
+ 
+   }
+ 
+   @Test
+   public void testColumnStatistics() throws Throwable {
+ 
+     String dbName = "columnstatstestdb";
+     String tblName = "tbl";
+     String typeName = "Person";
+     String tblOwner = "testowner";
+     int lastAccessed = 6796;
+ 
+     try {
+       cleanUp(dbName, tblName, typeName);
+       new DatabaseBuilder()
+           .setName(dbName)
+           .create(client, conf);
+       createTableForTestFilter(dbName,tblName, tblOwner, lastAccessed, true);
+ 
+       // Create a ColumnStatistics Obj
+       String[] colName = new String[]{"income", "name"};
+       double lowValue = 50000.21;
+       double highValue = 1200000.4525;
+       long numNulls = 3;
+       long numDVs = 22;
+       double avgColLen = 50.30;
+       long maxColLen = 102;
+       String[] colType = new String[] {"double", "string"};
+       boolean isTblLevel = true;
+       String partName = null;
+       List<ColumnStatisticsObj> statsObjs = new ArrayList<>();
+ 
+       ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc();
+       statsDesc.setDbName(dbName);
+       statsDesc.setTableName(tblName);
+       statsDesc.setIsTblLevel(isTblLevel);
+       statsDesc.setPartName(partName);
+ 
+       ColumnStatisticsObj statsObj = new ColumnStatisticsObj();
+       statsObj.setColName(colName[0]);
+       statsObj.setColType(colType[0]);
+ 
+       ColumnStatisticsData statsData = new ColumnStatisticsData();
+       DoubleColumnStatsData numericStats = new DoubleColumnStatsData();
+       statsData.setDoubleStats(numericStats);
+ 
+       statsData.getDoubleStats().setHighValue(highValue);
+       statsData.getDoubleStats().setLowValue(lowValue);
+       statsData.getDoubleStats().setNumDVs(numDVs);
+       statsData.getDoubleStats().setNumNulls(numNulls);
+ 
+       statsObj.setStatsData(statsData);
+       statsObjs.add(statsObj);
+ 
+       statsObj = new ColumnStatisticsObj();
+       statsObj.setColName(colName[1]);
+       statsObj.setColType(colType[1]);
+ 
+       statsData = new ColumnStatisticsData();
+       StringColumnStatsData stringStats = new StringColumnStatsData();
+       statsData.setStringStats(stringStats);
+       statsData.getStringStats().setAvgColLen(avgColLen);
+       statsData.getStringStats().setMaxColLen(maxColLen);
+       statsData.getStringStats().setNumDVs(numDVs);
+       statsData.getStringStats().setNumNulls(numNulls);
+ 
+       statsObj.setStatsData(statsData);
+       statsObjs.add(statsObj);
+ 
+       ColumnStatistics colStats = new ColumnStatistics();
+       colStats.setStatsDesc(statsDesc);
+       colStats.setStatsObj(statsObjs);
+ 
+       // write stats objs persistently
+       client.updateTableColumnStatistics(colStats);
+ 
+       // retrieve the stats obj that was just written
+       ColumnStatisticsObj colStats2 = client.getTableColumnStatistics(
+           dbName, tblName, Lists.newArrayList(colName[0])).get(0);
+ 
+      // compare stats obj to ensure what we get is what we wrote
+       assertNotNull(colStats2);
+       assertEquals(colStats2.getColName(), colName[0]);
+       assertEquals(colStats2.getStatsData().getDoubleStats().getLowValue(), lowValue, 0.01);
+       assertEquals(colStats2.getStatsData().getDoubleStats().getHighValue(), highValue, 0.01);
+       assertEquals(colStats2.getStatsData().getDoubleStats().getNumNulls(), numNulls);
+       assertEquals(colStats2.getStatsData().getDoubleStats().getNumDVs(), numDVs);
+ 
+       // test delete column stats; if no col name is passed all column stats associated with the
+       // table is deleted
+       boolean status = client.deleteTableColumnStatistics(dbName, tblName, null);
+       assertTrue(status);
+       // try to query stats for a column for which stats doesn't exist
+       assertTrue(client.getTableColumnStatistics(
+           dbName, tblName, Lists.newArrayList(colName[1])).isEmpty());
+ 
+       colStats.setStatsDesc(statsDesc);
+       colStats.setStatsObj(statsObjs);
+ 
+       // update table level column stats
+       client.updateTableColumnStatistics(colStats);
+ 
+       // query column stats for column whose stats were updated in the previous call
+       colStats2 = client.getTableColumnStatistics(
+           dbName, tblName, Lists.newArrayList(colName[0])).get(0);
+ 
+       // partition level column statistics test
+       // create a table with multiple partitions
+       cleanUp(dbName, tblName, typeName);
+ 
+       List<List<String>> values = new ArrayList<>();
+       values.add(makeVals("2008-07-01 14:13:12", "14"));
+       values.add(makeVals("2008-07-01 14:13:12", "15"));
+       values.add(makeVals("2008-07-02 14:13:12", "15"));
+       values.add(makeVals("2008-07-03 14:13:12", "151"));
+ 
+       createMultiPartitionTableSchema(dbName, tblName, typeName, values);
+ 
+       List<String> partitions = client.listPartitionNames(dbName, tblName, (short)-1);
+ 
+       partName = partitions.get(0);
+       isTblLevel = false;
+ 
+       // create a new columnstatistics desc to represent partition level column stats
+       statsDesc = new ColumnStatisticsDesc();
+       statsDesc.setDbName(dbName);
+       statsDesc.setTableName(tblName);
+       statsDesc.setPartName(partName);
+       statsDesc.setIsTblLevel(isTblLevel);
+ 
+       colStats = new ColumnStatistics();
+       colStats.setStatsDesc(statsDesc);
+       colStats.setStatsObj(statsObjs);
+ 
+      client.updatePartitionColumnStatistics(colStats);
+ 
+      colStats2 = client.getPartitionColumnStatistics(dbName, tblName,
+          Lists.newArrayList(partName), Lists.newArrayList(colName[1])).get(partName).get(0);
+ 
+      // compare stats obj to ensure what we get is what we wrote
+      assertNotNull(colStats2);
+      assertEquals(colStats.getStatsDesc().getPartName(), partName);
+      assertEquals(colStats2.getColName(), colName[1]);
+      assertEquals(colStats2.getStatsData().getStringStats().getMaxColLen(), maxColLen);
+      assertEquals(colStats2.getStatsData().getStringStats().getAvgColLen(), avgColLen, 0.01);
+      assertEquals(colStats2.getStatsData().getStringStats().getNumNulls(), numNulls);
+      assertEquals(colStats2.getStatsData().getStringStats().getNumDVs(), numDVs);
+ 
+      // test stats deletion at partition level
+      client.deletePartitionColumnStatistics(dbName, tblName, partName, colName[1]);
+ 
+      colStats2 = client.getPartitionColumnStatistics(dbName, tblName,
+          Lists.newArrayList(partName), Lists.newArrayList(colName[0])).get(partName).get(0);
+ 
+      // test get stats on a column for which stats doesn't exist
+      assertTrue(client.getPartitionColumnStatistics(dbName, tblName,
+            Lists.newArrayList(partName), Lists.newArrayList(colName[1])).isEmpty());
+     } catch (Exception e) {
+       System.err.println(StringUtils.stringifyException(e));
+       System.err.println("testColumnStatistics() failed.");
+       throw e;
+     } finally {
+       cleanUp(dbName, tblName, typeName);
+     }
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testGetSchemaWithNoClassDefFoundError() throws TException {
+     String dbName = "testDb";
+     String tblName = "testTable";
+ 
+     client.dropTable(dbName, tblName);
+     silentDropDatabase(dbName);
+ 
+     new DatabaseBuilder()
+         .setName(dbName)
+         .create(client, conf);
+ 
+     Table tbl = new TableBuilder()
+         .setDbName(dbName)
+         .setTableName(tblName)
+         .addCol("name", ColumnType.STRING_TYPE_NAME, "")
+         .setSerdeLib("no.such.class")
+         .create(client, conf);
+ 
+     client.getSchema(dbName, tblName);
+   }
+ 
+   @Test
+   public void testAlterTable() throws Exception {
+     String dbName = "alterdb";
+     String invTblName = "alter-tbl";
+     String tblName = "altertbl";
+ 
+     try {
+       client.dropTable(dbName, tblName);
+       silentDropDatabase(dbName);
+ 
+       new DatabaseBuilder()
+           .setName(dbName)
+           .create(client, conf);
+ 
+       ArrayList<FieldSchema> invCols = new ArrayList<>(2);
+       invCols.add(new FieldSchema("n-ame", ColumnType.STRING_TYPE_NAME, ""));
+       invCols.add(new FieldSchema("in.come", ColumnType.INT_TYPE_NAME, ""));
+ 
+       Table tbl = new TableBuilder()
+           .setDbName(dbName)
+           .setTableName(invTblName)
+           .setCols(invCols)
+           .build(conf);
+ 
+       boolean failed = false;
+       try {
+         client.createTable(tbl);
+       } catch (InvalidObjectException ex) {
+         failed = true;
+       }
+       if (!failed) {
+         assertTrue("Able to create table with invalid name: " + invTblName,
+             false);
+       }
+ 
+       // create an invalid table which has wrong column type
+       ArrayList<FieldSchema> invColsInvType = new ArrayList<>(2);
+       invColsInvType.add(new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+       invColsInvType.add(new FieldSchema("income", "xyz", ""));
+       tbl.setTableName(tblName);
+       tbl.getSd().setCols(invColsInvType);
+       boolean failChecker = false;
+       try {
+         client.createTable(tbl);
+       } catch (InvalidObjectException ex) {
+         failChecker = true;
+       }
+       if (!failChecker) {
+         assertTrue("Able to create table with invalid column type: " + invTblName,
+             false);
+       }
+ 
+       ArrayList<FieldSchema> cols = new ArrayList<>(2);
+       cols.add(new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+       cols.add(new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+ 
+       // create a valid table
+       tbl.setTableName(tblName);
+       tbl.getSd().setCols(cols);
+       client.createTable(tbl);
+ 
+       if (isThriftClient) {
+         tbl = client.getTable(tbl.getDbName(), tbl.getTableName());
+       }
+ 
+       // now try to invalid alter table
+       Table tbl2 = client.getTable(dbName, tblName);
+       failed = false;
+       try {
+         tbl2.setTableName(invTblName);
+         tbl2.getSd().setCols(invCols);
+         client.alter_table(dbName, tblName, tbl2);
+       } catch (InvalidOperationException ex) {
+         failed = true;
+       }
+       if (!failed) {
+         assertTrue("Able to rename table with invalid name: " + invTblName,
+             false);
+       }
+ 
+       //try an invalid alter table with partition key name
+       Table tbl_pk = client.getTable(tbl.getDbName(), tbl.getTableName());
+       List<FieldSchema> partitionKeys = tbl_pk.getPartitionKeys();
+       for (FieldSchema fs : partitionKeys) {
+         fs.setName("invalid_to_change_name");
+         fs.setComment("can_change_comment");
+       }
+       tbl_pk.setPartitionKeys(partitionKeys);
+       try {
+         client.alter_table(dbName, tblName, tbl_pk);
+       } catch (InvalidOperationException ex) {
+         failed = true;
+       }
+       assertTrue("Should not have succeeded in altering partition key name", failed);
+ 
+       //try a valid alter table partition key comment
+       failed = false;
+       tbl_pk = client.getTable(tbl.getDbName(), tbl.getTableName());
+       partitionKeys = tbl_pk.getPartitionKeys();
+       for (FieldSchema fs : partitionKeys) {
+         fs.setComment("can_change_comment");
+       }
+       tbl_pk.setPartitionKeys(partitionKeys);
+       try {
+         client.alter_table(dbName, tblName, tbl_pk);
+       } catch (InvalidOperationException ex) {
+         failed = true;
+       }
+       assertFalse("Should not have failed alter table partition comment", failed);
+       Table newT = client.getTable(tbl.getDbName(), tbl.getTableName());
+       assertEquals(partitionKeys, newT.getPartitionKeys());
+ 
+       // try a valid alter table
+       tbl2.setTableName(tblName + "_renamed");
+       tbl2.getSd().setCols(cols);
+       tbl2.getSd().setNumBuckets(32);
+       client.alter_table(dbName, tblName, tbl2);
+       Table tbl3 = client.getTable(dbName, tbl2.getTableName());
+       assertEquals("Alter table didn't succeed. Num buckets is different ",
+           tbl2.getSd().getNumBuckets(), tbl3.getSd().getNumBuckets());
+       // check that data has moved
+       FileSystem fs = FileSystem.get((new Path(tbl.getSd().getLocation())).toUri(), conf);
+       assertFalse("old table location still exists", fs.exists(new Path(tbl
+           .getSd().getLocation())));
+       assertTrue("data did not move to new location", fs.exists(new Path(tbl3
+           .getSd().getLocation())));
+ 
+       if (!isThriftClient) {
+         assertEquals("alter table didn't move data correct location", tbl3
+             .getSd().getLocation(), tbl2.getSd().getLocation());
+       }
+ 
+       // alter table with invalid column type
+       tbl_pk.getSd().setCols(invColsInvType);
+       failed = false;
+       try {
+         client.alter_table(dbName, tbl2.getTableName(), tbl_pk);
+       } catch (InvalidOperationException ex) {
+         failed = true;
+       }
+       assertTrue("Should not have succeeded in altering column", failed);
+ 
+     } catch (Exception e) {
+       System.err.println(StringUtils.stringifyException(e));
+       System.err.println("testSimpleTable() failed.");
+       throw e;
+     } finally {
+       silentDropDatabase(dbName);
+     }
+   }
+ 
+   @Test
+   public void testComplexTable() throws Exception {
+ 
+     String dbName = "compdb";
+     String tblName = "comptbl";
+     String typeName = "Person";
+ 
+     try {
+       client.dropTable(dbName, tblName);
+       silentDropDatabase(dbName);
+       new DatabaseBuilder()
+           .setName(dbName)
+           .create(client, conf);
+ 
+       client.dropType(typeName);
+       Type typ1 = new Type();
+       typ1.setName(typeName);
+       typ1.setFields(new ArrayList<>(2));
+       typ1.getFields().add(
+           new FieldSchema("name", ColumnType.STRING_TYPE_NAME, ""));
+       typ1.getFields().add(
+           new FieldSchema("income", ColumnType.INT_TYPE_NAME, ""));
+       client.createType(typ1);
+ 
+       Table tbl = new TableBuilder()
+           .setDbName(dbName)
+           .setTableName(tblName)
+           .setCols(typ1.getFields())
+           .addPartCol("ds", ColumnType.DATE_TYPE_NAME)
+           .addPartCol("hr", ColumnType.INT_TYPE_NAME)
+           .setNumBuckets(1)
+           .addBucketCol("name")
+           .addStorageDescriptorParam("test_param_1","Use this for comments etc")
+           .create(client, conf);
+ 
+       Table tbl2 = client.getTable(dbName, tblName);
+       assertEquals(tbl2.getDbName(), dbName);
+       assertEquals(tbl2.getTableName(), tblName);
+       assertEquals(tbl2.getSd().getCols().size(), typ1.getFields().size());
+       assertFalse(tbl2.getSd().isCompressed());
+       assertFalse(tbl2.getSd().isStoredAsSubDirectories());
+       assertEquals(tbl2.getSd().getNumBuckets(), 1);
+ 
+       assertEquals("Use this for comments etc", tbl2.getSd().getParameters()
+           .get("test_param_1"));
+       assertEquals("name", tbl2.getSd().getBucketCols().get(0));
+ 
+       assertNotNull(tbl2.getPartitionKeys());
+       assertEquals(2, tbl2.getPartitionKeys().size());
+       assertEquals(ColumnType.DATE_TYPE_NAME, tbl2.getPartitionKeys().get(0)
+           .getType());
+       assertEquals(ColumnType.INT_TYPE_NAME, tbl2.getPartitionKeys().get(1)
+           .getType());
+       assertEquals("ds", tbl2.getPartitionKeys().get(0).getName());
+       assertEquals("hr", tbl2.getPartitionKeys().get(1).getName());
+ 
+       List<FieldSchema> fieldSchemas = client.getFields(dbName, tblName);
+       assertNotNull(fieldSchemas);
+       assertEquals(fieldSchemas.size(), tbl.getSd().getCols().size());
+       for (FieldSchema fs : tbl.getSd().getCols()) {
+         assertTrue(fieldSchemas.contains(fs));
+       }
+ 
+       List<FieldSchema> fieldSchemasFull = client.getSchema(dbName, tblName);
+       assertNotNull(fieldSchemasFull);
+       assertEquals(fieldSchemasFull.size(), tbl.getSd().getCols().size()
+           + tbl.getPartitionKeys().size());
+       for (FieldSchema fs : tbl.getSd().getCols()) {
+         assertTrue(fieldSchemasFull.contains(fs));
+       }
+       for (FieldSchema fs : tbl.getPartitionKeys()) {
+         assertTrue(fieldSchemasFull.contains(fs));
+       }
+     } catch (Exception e) {
+       System.err.println(StringUtils.stringifyException(e));
+       System.err.println("testComplexTable() failed.");
+       throw e;
+     } finally {
+       client.dropTable(dbName, tblName);
+       boolean ret = client.dropType(typeName);
+       assertTrue("Unable to drop type " + typeName, ret);
+       client.dropDatabase(dbName);
+     }
+   }
+ 
+   @Test
+   public void testTableDatabase() throws Exception {
+     String dbName = "testDb";
+     String tblName_1 = "testTbl_1";
+     String tblName_2 = "testTbl_2";
+ 
+     try {
+       silentDropDatabase(dbName);
+ 
+       String dbLocation =
+           MetastoreConf.getVar(conf, ConfVars.WAREHOUSE) + "_testDB_table_create_";
+       new DatabaseBuilder()
+           .setName(dbName)
+           .setLocation(dbLocation)
+           .create(client, conf);
+       Database db = client.getDatabase(dbName);
+ 
+       Table tbl = new TableBuilder()
+           .setDbName(dbName)
+           .setTableName(tblName_1)
+           .addCol("name", ColumnType.STRING_TYPE_NAME)
+           .addCol("income", ColumnType.INT_TYPE_NAME)
+           .create(client, conf);
+ 
+       tbl = client.getTable(dbName, tblName_1);
+ 
+       Path path = new Path(tbl.getSd().getLocation());
+       System.err.println("Table's location " + path + ", Database's location " + db.getLocationUri());
+       assertEquals("Table location is not a subset of the database location",
+           path.getParent().toString(), db.getLocationUri());
+ 
+     } catch (Exception e) {
+       System.err.println(StringUtils.stringifyException(e));
+       System.err.println("testTableDatabase() failed.");
+       throw e;
+     } finally {
+       silentDropDatabase(dbName);
+     }
+   }
+ 
+ 
+   @Test
+   public void testGetConfigValue() {
+ 
+     String val = "value";
+ 
+     if (!isThriftClient) {
+       try {
+         assertEquals(client.getConfigValue("hive.key1", val), "value1");
+         assertEquals(client.getConfigValue("hive.key2", val), "http://www.example.com");
+         assertEquals(client.getConfigValue("hive.key3", val), "");
+         assertEquals(client.getConfigValue("hive.key4", val), "0");
+         assertEquals(client.getConfigValue("hive.key5", val), val);
+         assertEquals(client.getConfigValue(null, val), val);
+       } catch (TException e) {
+         e.printStackTrace();
+         fail();
+       }
+     }
+ 
+     boolean threwException = false;
+     try {
+       // Attempting to get the password should throw an exception
+       client.getConfigValue("javax.jdo.option.ConnectionPassword", "password");
+     } catch (ConfigValSecurityException e) {
+       threwException = true;
+     } catch (TException e) {
+       e.printStackTrace();
+       fail();
+     }
+     assert (threwException);
+   }
+ 
+   private static void adjust(HiveMetaStoreClient client, Partition part,
 -      String dbName, String tblName) throws TException {
++      String dbName, String tblName, boolean isThriftClient) throws TException {
+     Partition part_get = client.getPartition(dbName, tblName, part.getValues());
 -    part.setCreateTime(part_get.getCreateTime());
 -    part.putToParameters(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.DDL_TIME, Long.toString(part_get.getCreateTime()));
++    if (isThriftClient) {
++      part.setCreateTime(part_get.getCreateTime());
++      part.putToParameters(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.DDL_TIME, Long.toString(part_get.getCreateTime()));
++    }
++    part.setWriteId(part_get.getWriteId());
+   }
+ 
++
++
+   private static void silentDropDatabase(String dbName) throws TException {
+     try {
+       for (String tableName : client.getTables(dbName, "*")) {
+         client.dropTable(dbName, tableName);
+       }
+       client.dropDatabase(dbName);
+     } catch (NoSuchObjectException|InvalidOperationException e) {
+       // NOP
+     }
+   }
+ 
+   /**
+    * Tests for list partition by filter functionality.
+    */
+ 
+   @Test
+   public void testPartitionFilter() throws Exception {
+     String dbName = "filterdb";
+     String tblName = "filtertbl";
+ 
+     silentDropDatabase(dbName);
+ 
+     new DatabaseBuilder()
+         .setName(dbName)
+         .create(client, conf);
+ 
+     Table tbl = new TableBuilder()
+         .setDbName(dbName)
+         .setTableName(tblName)
+         .addCol("c1", ColumnType.STRING_TYPE_NAME)
+         .addCol("c2", ColumnType.INT_TYPE_NAME)
+         .addPartCol("p1", ColumnType.STRING_TYPE_NAME)
+         .addPartCol("p2", ColumnType.STRING_TYPE_NAME)
+         .addPartCol("p3", ColumnType.INT_TYPE_NAME)
+         .create(client, conf);
+ 
+     tbl = client.getTable(dbName, tblName);
+ 
+     add_partition(client, tbl, Lists.newArrayList("p11", "p21", "31"), "part1");
+     add_partition(client, tbl, Lists.newArrayList("p11", "p22", "32"), "part2");
+     add_partition(client, tbl, Lists.newArrayList("p12", "p21", "31"), "part3");
+     add_partition(client, tbl, Lists.newArrayList("p12", "p23", "32"), "part4");
+     add_partition(client, tbl, Lists.newArrayList("p13", "p24", "31"), "part5");
+     add_partition(client, tbl, Lists.newArrayList("p13", "p25", "-33"), "part6");
+ 
+     // Test equals operator for strings and integers.
+     checkFilter(client, dbName, tblName, "p1 = \"p11\"", 2);
+     checkFilter(client, dbName, tblName, "p1 = \"p12\"", 2);
+     checkFilter(client, dbName, tblName, "p2 = \"p21\"", 2);
+     checkFilter(client, dbName, tblName, "p2 = \"p23\"", 1);
+     checkFilter(client, dbName, tblName, "p3 = 31", 3);
+     checkFilter(client, dbName, tblName, "p3 = 33", 0);
+     checkFilter(client, dbName, tblName, "p3 = -33", 1);
+     checkFilter(client, dbName, tblName, "p1 = \"p11\" and p2=\"p22\"", 1);
+     checkFilter(client, dbName, tblName, "p1 = \"p11\" or p2=\"p23\"", 3);
+     checkFilter(client, dbName, tblName, "p1 = \"p11\" or p1=\"p12\"", 4);
+     checkFilter(client, dbName, tblName, "p1 = \"p11\" or p1=\"p12\"", 4);
+     checkFilter(client, dbName, tblName, "p1 = \"p11\" or p1=\"p12\"", 4);
+     checkFilter(client, dbName, tblName, "p1 = \"p11\" and p3 = 31", 1);
+     checkFilter(client, dbName, tblName, "p3 = -33 or p1 = \"p12\"", 3);
+ 
+     // Test not-equals operator for strings and integers.
+     checkFilter(client, dbName, tblName, "p1 != \"p11\"", 4);
+     checkFilter(client, dbName, tblName, "p2 != \"p23\"", 5);
+     checkFilter(client, dbName, tblName, "p2 != \"p33\"", 6);
+     checkFilter(client, dbName, tblName, "p3 != 32", 4);
+     checkFilter(client, dbName, tblName, "p3 != 8589934592", 6);
+     checkFilter(client, dbName, tblName, "p1 != \"p11\" and p1 != \"p12\"", 2);
+     checkFilter(client, dbName, tblName, "p1 != \"p11\" and p2 != \"p22\"", 4);
+     checkFilter(client, dbName, tblName, "p1 != \"p11\" or p2 != \"p22\"", 5);
+     checkFilter(client, dbName, tblName, "p1 != \"p12\" and p2 != \"p25\"", 3);
+     checkFilter(client, dbName, tblName, "p1 != \"p12\" or p2 != \"p25\"", 6);
+     checkFilter(client, dbName, tblName, "p3 != -33 or p1 != \"p13\"", 5);
+     checkFilter(client, dbName, tblName, "p1 != \"p11\" and p3 = 31", 2);
+     checkFilter(client, dbName, tblName, "p3 != 31 and p1 = \"p12\"", 1);
+ 
+     // Test reverse order.
+     checkFilter(client, dbName, tblName, "31 != p3 and p1 = \"p12\"", 1);
+     checkFilter(client, dbName, tblName, "\"p23\" = p2", 1);
+ 
+     // Test and/or more...
+     checkFilter(client, dbName, tblName,
+         "p1 = \"p11\" or (p1=\"p12\" and p2=\"p21\")", 3);
+     checkFilter(client, dbName, tblName,
+        "p1 = \"p11\" or (p1=\"p12\" and p2=\"p21\") Or " +
+        "(p1=\"p13\" aNd p2=\"p24\")", 4);
+     //test for and or precedence
+     checkFilter(client, dbName, tblName,
+        "p1=\"p12\" and (p2=\"p27\" Or p2=\"p21\")", 1);
+     checkFilter(client, dbName, tblName,
+        "p1=\"p12\" and p2=\"p27\" Or p2=\"p21\"", 2);
+ 
+     // Test gt/lt/lte/gte/like for strings.
+     checkFilter(client, dbName, tblName, "p1 > \"p12\"", 2);
+     checkFilter(client, dbName, tblName, "p1 >= \"p12\"", 4);
+     checkFilter(client, dbName, tblName, "p1 < \"p12\"", 2);
+     checkFilter(client, dbName, tblName, "p1 <= \"p12\"", 4);
+     checkFilter(client, dbName, tblName, "p1 like \"p1.*\"", 6);
+     checkFilter(client, dbName, tblName, "p2 like \"p.*3\"", 1);
+ 
+     // Test gt/lt/lte/gte for numbers.
+     checkFilter(client, dbName, tblName, "p3 < 0", 1);
+     checkFilter(client, dbName, tblName, "p3 >= -33", 6);
+     checkFilter(client, dbName, tblName, "p3 > -33", 5);
+     checkFilter(client, dbName, tblName, "p3 > 31 and p3 < 32", 0);
+     checkFilter(client, dbName, tblName, "p3 > 31 or p3 < 31", 3);
+     checkFilter(client, dbName, tblName, "p3 > 30 or p3 < 30", 6);
+     checkFilter(client, dbName, tblName, "p3 >= 31 or p3 < -32", 6);
+     checkFilter(client, dbName, tblName, "p3 >= 32", 2);
+     checkFilter(client, dbName, tblName, "p3 > 32", 0);
+ 
+     // Test between
+     checkFilter(client, dbName, tblName, "p1 between \"p11\" and \"p12\"", 4);
+     checkFilter(client, dbName, tblName, "p1 not between \"p11\" and \"p12\"", 2);
+     checkFilter(client, dbName, tblName, "p3 not between 0 and 2", 6);
+     checkFilter(client, dbName, tblName, "p3 between 31 and 32", 5);
+     checkFilter(client, dbName, tblName, "p3 between 32 and 31", 0);
+     checkFilter(client, dbName, tblName, "p3 between -32 and 34 and p3 not between 31 and 32", 0);
+     checkFilter(client, dbName, tblName, "p3 between 1 and 3 or p3 not between 1 and 3", 6);
+     checkFilter(client, dbName, tblName,
+         "p3 between 31 and 32 and p1 between \"p12\" and \"p14\"", 3);
+ 
+     //Test for setting the maximum partition count
+     List<Partition> partitions = client.listPartitionsByFilter(dbName,
+         tblName, "p1 >= \"p12\"", (short) 2);
+     assertEquals("User specified row limit for partitions",
+         2, partitions.size());
+ 
+     //Negative tests
+     Exception me = null;
+     try {
+       client.listPartitionsByFilter(dbName,
+           tblName, "p3 >= \"p12\"", (short) -1);
+     } catch(MetaException e) {
+       me = e;
+     }
+     assertNotNull(me);
+     assertTrue("Filter on int partition key", me.getMessage().contains(
+           "Filtering is supported only on partition keys of type string"));
+ 
+     me = null;
+     try {
+       client.listPartitionsByFilter(dbName,
+           tblName, "c1 >= \"p12\"", (short) -1);
+     } catch(MetaException e) {
+       me = e;
+     }
+     assertNotNull(me);
+     assertTrue("Filter on invalid key", me.getMessage().contains(
+           "<c1> is not a partitioning key for the table"));
+ 
+     me = null;
+     try {
+       client.listPartitionsByFilter(dbName,
+           tblName, "c1 >= ", (short) -1);
+     } catch(MetaException e) {
+       me = e;
+     }
+     assertNotNull(me);
+     assertTrue("Invalid filter string", me.getMessage().contain

<TRUNCATED>

[20/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index 0000000,8ff056f..9bee0db
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@@ -1,0 -1,2532 +1,2532 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.cache;
+ 
+ 
+ import java.nio.ByteBuffer;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collection;
+ import java.util.EmptyStackException;
+ import java.util.HashMap;
+ import java.util.LinkedList;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Stack;
+ import java.util.concurrent.Executors;
+ import java.util.concurrent.ScheduledExecutorService;
+ import java.util.concurrent.ThreadFactory;
+ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.atomic.AtomicBoolean;
+ import java.util.regex.Matcher;
+ import java.util.regex.Pattern;
+ 
+ import org.apache.hadoop.conf.Configurable;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.DatabaseName;
+ import org.apache.hadoop.hive.common.StatsSetupConst;
+ import org.apache.hadoop.hive.common.TableName;
+ import org.apache.hadoop.hive.metastore.Deadline;
+ import org.apache.hadoop.hive.metastore.FileMetadataHandler;
+ import org.apache.hadoop.hive.metastore.ObjectStore;
+ import org.apache.hadoop.hive.metastore.PartFilterExprUtil;
+ import org.apache.hadoop.hive.metastore.PartitionExpressionProxy;
+ import org.apache.hadoop.hive.metastore.RawStore;
+ import org.apache.hadoop.hive.metastore.TableType;
+ import org.apache.hadoop.hive.metastore.Warehouse;
 -import org.apache.hadoop.hive.metastore.api.AggrStats;
 -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 -import org.apache.hadoop.hive.metastore.api.Catalog;
 -import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
 -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 -import org.apache.hadoop.hive.metastore.api.CreationMetadata;
 -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 -import org.apache.hadoop.hive.metastore.api.Database;
 -import org.apache.hadoop.hive.metastore.api.FieldSchema;
 -import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 -import org.apache.hadoop.hive.metastore.api.Function;
 -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
 -import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
 -import org.apache.hadoop.hive.metastore.api.ISchema;
 -import org.apache.hadoop.hive.metastore.api.ISchemaName;
 -import org.apache.hadoop.hive.metastore.api.InvalidInputException;
 -import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
 -import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
 -import org.apache.hadoop.hive.metastore.api.MetaException;
 -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 -import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
 -import org.apache.hadoop.hive.metastore.api.Partition;
 -import org.apache.hadoop.hive.metastore.api.PartitionEventType;
 -import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
 -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 -import org.apache.hadoop.hive.metastore.api.PrincipalType;
 -import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 -import org.apache.hadoop.hive.metastore.api.WMNullablePool;
 -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WMTrigger;
 -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
++import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.cache.SharedCache.StatsType;
+ import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregator;
+ import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregatorFactory;
 -import org.apache.hadoop.hive.metastore.api.Role;
 -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
 -import org.apache.hadoop.hive.metastore.api.RuntimeStat;
 -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
 -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
 -import org.apache.hadoop.hive.metastore.api.SchemaVersion;
 -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
 -import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 -import org.apache.hadoop.hive.metastore.api.Table;
 -import org.apache.hadoop.hive.metastore.api.TableMeta;
 -import org.apache.hadoop.hive.metastore.api.Type;
 -import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
 -import org.apache.hadoop.hive.metastore.api.UnknownTableException;
 -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WMMapping;
 -import org.apache.hadoop.hive.metastore.api.WMPool;
 -import org.apache.hadoop.hive.metastore.api.WriteEventInfo;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.utils.FileUtils;
+ import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.ColStatsObjWithSourceInfo;
+ import org.apache.hadoop.hive.metastore.utils.StringUtils;
+ import org.apache.thrift.TException;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import com.google.common.annotations.VisibleForTesting;
+ 
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
+ import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier;
+ 
+ // TODO filter->expr
+ // TODO functionCache
+ // TODO constraintCache
+ // TODO need sd nested copy?
+ // TODO String intern
+ // TODO monitor event queue
+ // TODO initial load slow?
+ // TODO size estimation
+ 
+ public class CachedStore implements RawStore, Configurable {
+   private static ScheduledExecutorService cacheUpdateMaster = null;
+   private static List<Pattern> whitelistPatterns = null;
+   private static List<Pattern> blacklistPatterns = null;
+   // Default value set to 100 milliseconds for test purpose
+   private static long DEFAULT_CACHE_REFRESH_PERIOD = 100;
+   // Time after which metastore cache is updated from metastore DB by the background update thread
+   private static long cacheRefreshPeriodMS = DEFAULT_CACHE_REFRESH_PERIOD;
+   private static AtomicBoolean isCachePrewarmed = new AtomicBoolean(false);
+   private static TablesPendingPrewarm tblsPendingPrewarm = new TablesPendingPrewarm();
+   private RawStore rawStore = null;
+   private Configuration conf;
+   private PartitionExpressionProxy expressionProxy = null;
+   private static final SharedCache sharedCache = new SharedCache();
+ 
+   static final private Logger LOG = LoggerFactory.getLogger(CachedStore.class.getName());
+ 
+   @Override
+   public void setConf(Configuration conf) {
+     setConfInternal(conf);
+     initBlackListWhiteList(conf);
+     initSharedCache(conf);
+     startCacheUpdateService(conf, false, true);
+   }
+ 
+   /**
+    * Similar to setConf but used from within the tests
+    * This does start the background thread for prewarm and update
+    * @param conf
+    */
+   void setConfForTest(Configuration conf) {
+     setConfInternal(conf);
+     initBlackListWhiteList(conf);
+     initSharedCache(conf);
+   }
+ 
+   private void setConfInternal(Configuration conf) {
+     String rawStoreClassName =
+         MetastoreConf.getVar(conf, ConfVars.CACHED_RAW_STORE_IMPL, ObjectStore.class.getName());
+     if (rawStore == null) {
+       try {
+         rawStore = (JavaUtils.getClass(rawStoreClassName, RawStore.class)).newInstance();
+       } catch (Exception e) {
+         throw new RuntimeException("Cannot instantiate " + rawStoreClassName, e);
+       }
+     }
+     rawStore.setConf(conf);
+     Configuration oldConf = this.conf;
+     this.conf = conf;
+     if (expressionProxy != null && conf != oldConf) {
+       LOG.warn("Unexpected setConf when we were already configured");
+     } else {
+       expressionProxy = PartFilterExprUtil.createExpressionProxy(conf);
+     }
+   }
+ 
+   private void initSharedCache(Configuration conf) {
+     long maxSharedCacheSizeInBytes =
+         MetastoreConf.getSizeVar(conf, ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY);
+     sharedCache.initialize(maxSharedCacheSizeInBytes);
+     if (maxSharedCacheSizeInBytes > 0) {
+       LOG.info("Maximum memory that the cache will use: {} GB",
+           maxSharedCacheSizeInBytes / (1024 * 1024 * 1024));
+     }
+   }
+ 
+   @VisibleForTesting
+   /**
+    * This initializes the caches in SharedCache by getting the objects from Metastore DB via
+    * ObjectStore and populating the respective caches
+    */
+   static void prewarm(RawStore rawStore) {
+     if (isCachePrewarmed.get()) {
+       return;
+     }
+     long startTime = System.nanoTime();
+     LOG.info("Prewarming CachedStore");
+     while (!isCachePrewarmed.get()) {
+       // Prevents throwing exceptions in our raw store calls since we're not using RawStoreProxy
+       Deadline.registerIfNot(1000000);
+       Collection<String> catalogsToCache;
+       try {
+         catalogsToCache = catalogsToCache(rawStore);
+         LOG.info("Going to cache catalogs: "
+             + org.apache.commons.lang.StringUtils.join(catalogsToCache, ", "));
+         List<Catalog> catalogs = new ArrayList<>(catalogsToCache.size());
+         for (String catName : catalogsToCache) {
+           catalogs.add(rawStore.getCatalog(catName));
+         }
+         sharedCache.populateCatalogsInCache(catalogs);
+       } catch (MetaException | NoSuchObjectException e) {
+         LOG.warn("Failed to populate catalogs in cache, going to try again", e);
+         // try again
+         continue;
+       }
+       LOG.info("Finished prewarming catalogs, starting on databases");
+       List<Database> databases = new ArrayList<>();
+       for (String catName : catalogsToCache) {
+         try {
+           List<String> dbNames = rawStore.getAllDatabases(catName);
+           LOG.info("Number of databases to prewarm in catalog {}: {}", catName, dbNames.size());
+           for (String dbName : dbNames) {
+             try {
+               databases.add(rawStore.getDatabase(catName, dbName));
+             } catch (NoSuchObjectException e) {
+               // Continue with next database
+               LOG.warn("Failed to cache database "
+                   + DatabaseName.getQualified(catName, dbName) + ", moving on", e);
+             }
+           }
+         } catch (MetaException e) {
+           LOG.warn("Failed to cache databases in catalog " + catName + ", moving on", e);
+         }
+       }
+       sharedCache.populateDatabasesInCache(databases);
+       LOG.info(
+           "Databases cache is now prewarmed. Now adding tables, partitions and statistics to the cache");
+       int numberOfDatabasesCachedSoFar = 0;
+       for (Database db : databases) {
+         String catName = StringUtils.normalizeIdentifier(db.getCatalogName());
+         String dbName = StringUtils.normalizeIdentifier(db.getName());
+         List<String> tblNames;
+         try {
+           tblNames = rawStore.getAllTables(catName, dbName);
+         } catch (MetaException e) {
+           LOG.warn("Failed to cache tables for database "
+               + DatabaseName.getQualified(catName, dbName) + ", moving on");
+           // Continue with next database
+           continue;
+         }
+         tblsPendingPrewarm.addTableNamesForPrewarming(tblNames);
+         int totalTablesToCache = tblNames.size();
+         int numberOfTablesCachedSoFar = 0;
+         while (tblsPendingPrewarm.hasMoreTablesToPrewarm()) {
+           try {
+             String tblName =
+                 StringUtils.normalizeIdentifier(tblsPendingPrewarm.getNextTableNameToPrewarm());
+             if (!shouldCacheTable(catName, dbName, tblName)) {
+               continue;
+             }
+             Table table;
+             try {
+               table = rawStore.getTable(catName, dbName, tblName);
+             } catch (MetaException e) {
+               // It is possible the table is deleted during fetching tables of the database,
+               // in that case, continue with the next table
+               continue;
+             }
+             List<String> colNames = MetaStoreUtils.getColumnNamesForTable(table);
+             try {
+               ColumnStatistics tableColStats = null;
+               List<Partition> partitions = null;
+               List<ColumnStatistics> partitionColStats = null;
+               AggrStats aggrStatsAllPartitions = null;
+               AggrStats aggrStatsAllButDefaultPartition = null;
+               if (table.isSetPartitionKeys()) {
+                 Deadline.startTimer("getPartitions");
+                 partitions = rawStore.getPartitions(catName, dbName, tblName, Integer.MAX_VALUE);
+                 Deadline.stopTimer();
+                 List<String> partNames = new ArrayList<>(partitions.size());
+                 for (Partition p : partitions) {
+                   partNames.add(Warehouse.makePartName(table.getPartitionKeys(), p.getValues()));
+                 }
+                 if (!partNames.isEmpty()) {
+                   // Get partition column stats for this table
+                   Deadline.startTimer("getPartitionColumnStatistics");
+                   partitionColStats = rawStore.getPartitionColumnStatistics(catName, dbName,
+                       tblName, partNames, colNames);
+                   Deadline.stopTimer();
+                   // Get aggregate stats for all partitions of a table and for all but default
+                   // partition
+                   Deadline.startTimer("getAggrPartitionColumnStatistics");
+                   aggrStatsAllPartitions =
+                       rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
+                   Deadline.stopTimer();
+                   // Remove default partition from partition names and get aggregate
+                   // stats again
+                   List<FieldSchema> partKeys = table.getPartitionKeys();
+                   String defaultPartitionValue =
+                       MetastoreConf.getVar(rawStore.getConf(), ConfVars.DEFAULTPARTITIONNAME);
+                   List<String> partCols = new ArrayList<>();
+                   List<String> partVals = new ArrayList<>();
+                   for (FieldSchema fs : partKeys) {
+                     partCols.add(fs.getName());
+                     partVals.add(defaultPartitionValue);
+                   }
+                   String defaultPartitionName = FileUtils.makePartName(partCols, partVals);
+                   partNames.remove(defaultPartitionName);
+                   Deadline.startTimer("getAggrPartitionColumnStatistics");
+                   aggrStatsAllButDefaultPartition =
+                       rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
+                   Deadline.stopTimer();
+                 }
+               } else {
+                 Deadline.startTimer("getTableColumnStatistics");
+                 tableColStats =
+                     rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
+                 Deadline.stopTimer();
+               }
++              // TODO## should this take write ID into account? or at least cache write ID to verify?
+               // If the table could not cached due to memory limit, stop prewarm
+               boolean isSuccess = sharedCache.populateTableInCache(table, tableColStats, partitions,
+                   partitionColStats, aggrStatsAllPartitions, aggrStatsAllButDefaultPartition);
+               if (isSuccess) {
+                 LOG.trace("Cached Database: {}'s Table: {}.", dbName, tblName);
+               } else {
+                 LOG.info(
+                     "Unable to cache Database: {}'s Table: {}, since the cache memory is full. "
+                         + "Will stop attempting to cache any more tables.",
+                     dbName, tblName);
+                 completePrewarm(startTime);
+                 return;
+               }
+             } catch (MetaException | NoSuchObjectException e) {
+               // Continue with next table
+               continue;
+             }
+             LOG.debug("Processed database: {}'s table: {}. Cached {} / {}  tables so far.", dbName,
+                 tblName, ++numberOfTablesCachedSoFar, totalTablesToCache);
+           } catch (EmptyStackException e) {
+             // We've prewarmed this database, continue with the next one
+             continue;
+           }
+         }
+         LOG.debug("Processed database: {}. Cached {} / {} databases so far.", dbName,
+             ++numberOfDatabasesCachedSoFar, databases.size());
+       }
+       completePrewarm(startTime);
+     }
+   }
+ 
+   private static void completePrewarm(long startTime) {
+     isCachePrewarmed.set(true);
+     LOG.info("CachedStore initialized");
+     long endTime = System.nanoTime();
+     LOG.info("Time taken in prewarming = " + (endTime - startTime) / 1000000 + "ms");
+     sharedCache.completeTableCachePrewarm();
+   }
+ 
+   static class TablesPendingPrewarm {
+     private Stack<String> tableNames = new Stack<>();
+ 
+     private synchronized void addTableNamesForPrewarming(List<String> tblNames) {
+       tableNames.clear();
+       if (tblNames != null) {
+         tableNames.addAll(tblNames);
+       }
+     }
+ 
+     private synchronized boolean hasMoreTablesToPrewarm() {
+       return !tableNames.empty();
+     }
+ 
+     private synchronized String getNextTableNameToPrewarm() {
+       return tableNames.pop();
+     }
+ 
+     private synchronized void prioritizeTableForPrewarm(String tblName) {
+       // If the table is in the pending prewarm list, move it to the top
+       if (tableNames.remove(tblName)) {
+         tableNames.push(tblName);
+       }
+     }
+   }
+ 
+   @VisibleForTesting
+   static void setCachePrewarmedState(boolean state) {
+     isCachePrewarmed.set(state);
+   }
+ 
+   private static void initBlackListWhiteList(Configuration conf) {
+     if (whitelistPatterns == null || blacklistPatterns == null) {
+       whitelistPatterns = createPatterns(MetastoreConf.getAsString(conf,
+           MetastoreConf.ConfVars.CACHED_RAW_STORE_CACHED_OBJECTS_WHITELIST));
+       blacklistPatterns = createPatterns(MetastoreConf.getAsString(conf,
+           MetastoreConf.ConfVars.CACHED_RAW_STORE_CACHED_OBJECTS_BLACKLIST));
+     }
+   }
+ 
+   private static Collection<String> catalogsToCache(RawStore rs) throws MetaException {
+     Collection<String> confValue =
+         MetastoreConf.getStringCollection(rs.getConf(), ConfVars.CATALOGS_TO_CACHE);
+     if (confValue == null || confValue.isEmpty() ||
+         (confValue.size() == 1 && confValue.contains(""))) {
+       return rs.getCatalogs();
+     } else {
+       return confValue;
+     }
+   }
+ 
+   @VisibleForTesting
+   /**
+    * This starts a background thread, which initially populates the SharedCache and later
+    * periodically gets updates from the metastore db
+    *
+    * @param conf
+    * @param runOnlyOnce
+    * @param shouldRunPrewarm
+    */
+   static synchronized void startCacheUpdateService(Configuration conf, boolean runOnlyOnce,
+       boolean shouldRunPrewarm) {
+     if (cacheUpdateMaster == null) {
+       initBlackListWhiteList(conf);
+       if (!MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST)) {
+         cacheRefreshPeriodMS = MetastoreConf.getTimeVar(conf,
+             ConfVars.CACHED_RAW_STORE_CACHE_UPDATE_FREQUENCY, TimeUnit.MILLISECONDS);
+       }
+       LOG.info("CachedStore: starting cache update service (run every {} ms", cacheRefreshPeriodMS);
+       cacheUpdateMaster = Executors.newScheduledThreadPool(1, new ThreadFactory() {
+         @Override
+         public Thread newThread(Runnable r) {
+           Thread t = Executors.defaultThreadFactory().newThread(r);
+           t.setName("CachedStore-CacheUpdateService: Thread-" + t.getId());
+           t.setDaemon(true);
+           return t;
+         }
+       });
+       if (!runOnlyOnce) {
+         cacheUpdateMaster.scheduleAtFixedRate(new CacheUpdateMasterWork(conf, shouldRunPrewarm), 0,
+             cacheRefreshPeriodMS, TimeUnit.MILLISECONDS);
+       }
+     }
+     if (runOnlyOnce) {
+       // Some tests control the execution of the background update thread
+       cacheUpdateMaster.schedule(new CacheUpdateMasterWork(conf, shouldRunPrewarm), 0,
+           TimeUnit.MILLISECONDS);
+     }
+   }
+ 
+   @VisibleForTesting
+   static synchronized boolean stopCacheUpdateService(long timeout) {
+     boolean tasksStoppedBeforeShutdown = false;
+     if (cacheUpdateMaster != null) {
+       LOG.info("CachedStore: shutting down cache update service");
+       try {
+         tasksStoppedBeforeShutdown =
+             cacheUpdateMaster.awaitTermination(timeout, TimeUnit.MILLISECONDS);
+       } catch (InterruptedException e) {
+         LOG.info("CachedStore: cache update service was interrupted while waiting for tasks to "
+             + "complete before shutting down. Will make a hard stop now.");
+       }
+       cacheUpdateMaster.shutdownNow();
+       cacheUpdateMaster = null;
+     }
+     return tasksStoppedBeforeShutdown;
+   }
+ 
+   @VisibleForTesting
+   static void setCacheRefreshPeriod(long time) {
+     cacheRefreshPeriodMS = time;
+   }
+ 
+   static class CacheUpdateMasterWork implements Runnable {
+     private boolean shouldRunPrewarm = true;
+     private final RawStore rawStore;
+ 
+     CacheUpdateMasterWork(Configuration conf, boolean shouldRunPrewarm) {
+       this.shouldRunPrewarm = shouldRunPrewarm;
+       String rawStoreClassName =
+           MetastoreConf.getVar(conf, ConfVars.CACHED_RAW_STORE_IMPL, ObjectStore.class.getName());
+       try {
+         rawStore = JavaUtils.getClass(rawStoreClassName, RawStore.class).newInstance();
+         rawStore.setConf(conf);
+       } catch (InstantiationException | IllegalAccessException | MetaException e) {
+         // MetaException here really means ClassNotFound (see the utility method).
+         // So, if any of these happen, that means we can never succeed.
+         throw new RuntimeException("Cannot instantiate " + rawStoreClassName, e);
+       }
+     }
+ 
+     @Override
+     public void run() {
+       if (!shouldRunPrewarm) {
+         // TODO: prewarm and update can probably be merged.
+         update();
+       } else {
+         try {
+           prewarm(rawStore);
+         } catch (Exception e) {
+           LOG.error("Prewarm failure", e);
+           return;
+         }
+       }
+     }
+ 
+     void update() {
+       Deadline.registerIfNot(1000000);
+       LOG.debug("CachedStore: updating cached objects");
+       try {
+         for (String catName : catalogsToCache(rawStore)) {
+           List<String> dbNames = rawStore.getAllDatabases(catName);
+           // Update the database in cache
+           updateDatabases(rawStore, catName, dbNames);
+           for (String dbName : dbNames) {
+             // Update the tables in cache
+             updateTables(rawStore, catName, dbName);
+             List<String> tblNames;
+             try {
+               tblNames = rawStore.getAllTables(catName, dbName);
+             } catch (MetaException e) {
+               // Continue with next database
+               continue;
+             }
+             for (String tblName : tblNames) {
+               if (!shouldCacheTable(catName, dbName, tblName)) {
+                 continue;
+               }
+               // Update the table column stats for a table in cache
+               updateTableColStats(rawStore, catName, dbName, tblName);
+               // Update the partitions for a table in cache
+               updateTablePartitions(rawStore, catName, dbName, tblName);
+               // Update the partition col stats for a table in cache
+               updateTablePartitionColStats(rawStore, catName, dbName, tblName);
+               // Update aggregate partition column stats for a table in cache
+               updateTableAggregatePartitionColStats(rawStore, catName, dbName, tblName);
+             }
+           }
+       }
+       sharedCache.incrementUpdateCount();
+       } catch (MetaException e) {
+         LOG.error("Updating CachedStore: error happen when refresh; skipping this iteration", e);
+       }
+     }
+ 
+ 
+     private void updateDatabases(RawStore rawStore, String catName, List<String> dbNames) {
+       // Prepare the list of databases
+       List<Database> databases = new ArrayList<>();
+       for (String dbName : dbNames) {
+         Database db;
+         try {
+           db = rawStore.getDatabase(catName, dbName);
+           databases.add(db);
+         } catch (NoSuchObjectException e) {
+           LOG.info("Updating CachedStore: database - " + catName + "." + dbName
+               + " does not exist.", e);
+         }
+       }
+       sharedCache.refreshDatabasesInCache(databases);
+     }
+ 
+     private void updateTables(RawStore rawStore, String catName, String dbName) {
+       List<Table> tables = new ArrayList<>();
+       try {
+         List<String> tblNames = rawStore.getAllTables(catName, dbName);
+         for (String tblName : tblNames) {
+           if (!shouldCacheTable(catName, dbName, tblName)) {
+             continue;
+           }
+           Table table = rawStore.getTable(StringUtils.normalizeIdentifier(catName),
+               StringUtils.normalizeIdentifier(dbName),
+               StringUtils.normalizeIdentifier(tblName));
+           tables.add(table);
+         }
+         sharedCache.refreshTablesInCache(catName, dbName, tables);
+       } catch (MetaException e) {
+         LOG.debug("Unable to refresh cached tables for database: " + dbName, e);
+       }
+     }
+ 
+ 
+     private void updateTableColStats(RawStore rawStore, String catName, String dbName, String tblName) {
+       try {
+         Table table = rawStore.getTable(catName, dbName, tblName);
+         if (!table.isSetPartitionKeys()) {
+           List<String> colNames = MetaStoreUtils.getColumnNamesForTable(table);
+           Deadline.startTimer("getTableColumnStatistics");
++          // TODO## should this take write ID into account? or at least cache write ID to verify?
+           ColumnStatistics tableColStats =
+               rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
+           Deadline.stopTimer();
+           if (tableColStats != null) {
++            // TODO## should this take write ID into account? or at least cache write ID to verify?
+             sharedCache.refreshTableColStatsInCache(StringUtils.normalizeIdentifier(catName),
+                 StringUtils.normalizeIdentifier(dbName),
+                 StringUtils.normalizeIdentifier(tblName), tableColStats.getStatsObj());
+           }
+         }
+       } catch (MetaException | NoSuchObjectException e) {
+         LOG.info("Unable to refresh table column stats for table: " + tblName, e);
+       }
+     }
+ 
+     private void updateTablePartitions(RawStore rawStore, String catName, String dbName, String tblName) {
+       try {
+         Deadline.startTimer("getPartitions");
+         List<Partition> partitions = rawStore.getPartitions(catName, dbName, tblName, Integer.MAX_VALUE);
+         Deadline.stopTimer();
+         sharedCache.refreshPartitionsInCache(StringUtils.normalizeIdentifier(catName),
+             StringUtils.normalizeIdentifier(dbName),
+             StringUtils.normalizeIdentifier(tblName), partitions);
+       } catch (MetaException | NoSuchObjectException e) {
+         LOG.info("Updating CachedStore: unable to read partitions of table: " + tblName, e);
+       }
+     }
+ 
+     private void updateTablePartitionColStats(RawStore rawStore, String catName, String dbName, String tblName) {
+       try {
+         Table table = rawStore.getTable(catName, dbName, tblName);
+         List<String> colNames = MetaStoreUtils.getColumnNamesForTable(table);
+         List<String> partNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1);
+         // Get partition column stats for this table
+         Deadline.startTimer("getPartitionColumnStatistics");
++        // TODO## should this take write ID into account? or at least cache write ID to verify?
+         List<ColumnStatistics> partitionColStats =
+             rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames);
+         Deadline.stopTimer();
+         sharedCache.refreshPartitionColStatsInCache(catName, dbName, tblName, partitionColStats);
+       } catch (MetaException | NoSuchObjectException e) {
+         LOG.info("Updating CachedStore: unable to read partitions of table: " + tblName, e);
+       }
+     }
+ 
+     // Update cached aggregate stats for all partitions of a table and for all
+     // but default partition
+     private void updateTableAggregatePartitionColStats(RawStore rawStore, String catName, String dbName,
+                                                        String tblName) {
+       try {
+         Table table = rawStore.getTable(catName, dbName, tblName);
+         List<String> partNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1);
+         List<String> colNames = MetaStoreUtils.getColumnNamesForTable(table);
+         if ((partNames != null) && (partNames.size() > 0)) {
+           Deadline.startTimer("getAggregareStatsForAllPartitions");
+           AggrStats aggrStatsAllPartitions =
+               rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
+           Deadline.stopTimer();
+           // Remove default partition from partition names and get aggregate stats again
+           List<FieldSchema> partKeys = table.getPartitionKeys();
+           String defaultPartitionValue =
+               MetastoreConf.getVar(rawStore.getConf(), ConfVars.DEFAULTPARTITIONNAME);
+           List<String> partCols = new ArrayList<String>();
+           List<String> partVals = new ArrayList<String>();
+           for (FieldSchema fs : partKeys) {
+             partCols.add(fs.getName());
+             partVals.add(defaultPartitionValue);
+           }
+           String defaultPartitionName = FileUtils.makePartName(partCols, partVals);
+           partNames.remove(defaultPartitionName);
+           Deadline.startTimer("getAggregareStatsForAllPartitionsExceptDefault");
+           AggrStats aggrStatsAllButDefaultPartition =
+               rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
+           Deadline.stopTimer();
+           sharedCache.refreshAggregateStatsInCache(StringUtils.normalizeIdentifier(catName),
+               StringUtils.normalizeIdentifier(dbName),
+               StringUtils.normalizeIdentifier(tblName), aggrStatsAllPartitions,
+               aggrStatsAllButDefaultPartition);
+         }
+       } catch (MetaException | NoSuchObjectException e) {
+         LOG.info("Updating CachedStore: unable to read aggregate column stats of table: " + tblName,
+             e);
+       }
+     }
+   }
+ 
+   @Override
+   public Configuration getConf() {
+     return rawStore.getConf();
+   }
+ 
+   @Override
+   public void shutdown() {
+     rawStore.shutdown();
+   }
+ 
+   @Override
+   public boolean openTransaction() {
+     return rawStore.openTransaction();
+   }
+ 
+   @Override
+   public boolean commitTransaction() {
+     return rawStore.commitTransaction();
+   }
+ 
+   @Override
+   public boolean isActiveTransaction() {
+     return rawStore.isActiveTransaction();
+   }
+ 
+   @Override
+   public void rollbackTransaction() {
+     rawStore.rollbackTransaction();
+   }
+ 
+   @Override
+   public void createCatalog(Catalog cat) throws MetaException {
+     rawStore.createCatalog(cat);
+     sharedCache.addCatalogToCache(cat);
+   }
+ 
+   @Override
+   public void alterCatalog(String catName, Catalog cat) throws MetaException,
+       InvalidOperationException {
+     rawStore.alterCatalog(catName, cat);
+     sharedCache.alterCatalogInCache(StringUtils.normalizeIdentifier(catName), cat);
+   }
+ 
+   @Override
+   public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException {
+     if (!sharedCache.isCatalogCachePrewarmed()) {
+       return rawStore.getCatalog(catalogName);
+     }
+     Catalog cat = sharedCache.getCatalogFromCache(normalizeIdentifier(catalogName));
+     if (cat == null) {
+       throw new NoSuchObjectException();
+     }
+     return cat;
+   }
+ 
+   @Override
+   public List<String> getCatalogs() throws MetaException {
+     if (!sharedCache.isCatalogCachePrewarmed()) {
+       return rawStore.getCatalogs();
+     }
+     return sharedCache.listCachedCatalogs();
+   }
+ 
+   @Override
+   public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException {
+     rawStore.dropCatalog(catalogName);
+     catalogName = catalogName.toLowerCase();
+     sharedCache.removeCatalogFromCache(catalogName);
+   }
+ 
+   @Override
+   public void createDatabase(Database db) throws InvalidObjectException, MetaException {
+     rawStore.createDatabase(db);
+     sharedCache.addDatabaseToCache(db);
+   }
+ 
+   @Override
+   public Database getDatabase(String catName, String dbName) throws NoSuchObjectException {
+     if (!sharedCache.isDatabaseCachePrewarmed()) {
+       return rawStore.getDatabase(catName, dbName);
+     }
+     dbName = dbName.toLowerCase();
+     Database db = sharedCache.getDatabaseFromCache(StringUtils.normalizeIdentifier(catName),
+             StringUtils.normalizeIdentifier(dbName));
+     if (db == null) {
+       throw new NoSuchObjectException();
+     }
+     return db;
+   }
+ 
+   @Override
+   public boolean dropDatabase(String catName, String dbName) throws NoSuchObjectException, MetaException {
+     boolean succ = rawStore.dropDatabase(catName, dbName);
+     if (succ) {
+       sharedCache.removeDatabaseFromCache(StringUtils.normalizeIdentifier(catName),
+           StringUtils.normalizeIdentifier(dbName));
+     }
+     return succ;
+   }
+ 
+   @Override
+   public boolean alterDatabase(String catName, String dbName, Database db)
+       throws NoSuchObjectException, MetaException {
+     boolean succ = rawStore.alterDatabase(catName, dbName, db);
+     if (succ) {
+       sharedCache.alterDatabaseInCache(StringUtils.normalizeIdentifier(catName),
+           StringUtils.normalizeIdentifier(dbName), db);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public List<String> getDatabases(String catName, String pattern) throws MetaException {
+     if (!sharedCache.isDatabaseCachePrewarmed()) {
+       return rawStore.getDatabases(catName, pattern);
+     }
+     return sharedCache.listCachedDatabases(catName, pattern);
+   }
+ 
+   @Override
+   public List<String> getAllDatabases(String catName) throws MetaException {
+     if (!sharedCache.isDatabaseCachePrewarmed()) {
+       return rawStore.getAllDatabases(catName);
+     }
+     return sharedCache.listCachedDatabases(catName);
+   }
+ 
+   @Override
+   public boolean createType(Type type) {
+     return rawStore.createType(type);
+   }
+ 
+   @Override
+   public Type getType(String typeName) {
+     return rawStore.getType(typeName);
+   }
+ 
+   @Override
+   public boolean dropType(String typeName) {
+     return rawStore.dropType(typeName);
+   }
+ 
+   private void validateTableType(Table tbl) {
+     // If the table has property EXTERNAL set, update table type
+     // accordingly
+     String tableType = tbl.getTableType();
+     boolean isExternal = Boolean.parseBoolean(tbl.getParameters().get("EXTERNAL"));
+     if (TableType.MANAGED_TABLE.toString().equals(tableType)) {
+       if (isExternal) {
+         tableType = TableType.EXTERNAL_TABLE.toString();
+       }
+     }
+     if (TableType.EXTERNAL_TABLE.toString().equals(tableType)) {
+       if (!isExternal) {
+         tableType = TableType.MANAGED_TABLE.toString();
+       }
+     }
+     tbl.setTableType(tableType);
+   }
+ 
+   @Override
+   public void createTable(Table tbl) throws InvalidObjectException, MetaException {
+     rawStore.createTable(tbl);
+     String catName = normalizeIdentifier(tbl.getCatName());
+     String dbName = normalizeIdentifier(tbl.getDbName());
+     String tblName = normalizeIdentifier(tbl.getTableName());
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return;
+     }
+     validateTableType(tbl);
+     sharedCache.addTableToCache(catName, dbName, tblName, tbl);
+   }
+ 
+   @Override
+   public boolean dropTable(String catName, String dbName, String tblName)
+       throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException {
+     boolean succ = rawStore.dropTable(catName, dbName, tblName);
+     if (succ) {
+       catName = normalizeIdentifier(catName);
+       dbName = normalizeIdentifier(dbName);
+       tblName = normalizeIdentifier(tblName);
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       sharedCache.removeTableFromCache(catName, dbName, tblName);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public Table getTable(String catName, String dbName, String tblName) throws MetaException {
++    return getTable(catName, dbName, tblName, -1, null);
++  }
++
++  // TODO: if writeIdList is not null, check isolation level compliance for SVS,
++  // possibly with getTableFromCache() with table snapshot in cache.
++  @Override
++  public Table getTable(String catName, String dbName, String tblName,
++                        long txnId, String writeIdList)
++      throws MetaException {
+     catName = normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
 -      return rawStore.getTable(catName, dbName, tblName);
++      return rawStore.getTable(catName, dbName, tblName, txnId,writeIdList);
+     }
+     Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
 -    if (tbl == null) {
++    if (tbl == null || writeIdList != null) {
+       // This table is not yet loaded in cache
+       // If the prewarm thread is working on this table's database,
+       // let's move this table to the top of tblNamesBeingPrewarmed stack,
+       // so that it gets loaded to the cache faster and is available for subsequent requests
+       tblsPendingPrewarm.prioritizeTableForPrewarm(tblName);
 -      return rawStore.getTable(catName, dbName, tblName);
++      return rawStore.getTable(catName, dbName, tblName, txnId, writeIdList);
+     }
+     if (tbl != null) {
+       tbl.unsetPrivileges();
+       tbl.setRewriteEnabled(tbl.isRewriteEnabled());
+     }
+     return tbl;
+   }
+ 
+   @Override
+   public boolean addPartition(Partition part) throws InvalidObjectException, MetaException {
+     boolean succ = rawStore.addPartition(part);
+     if (succ) {
+       String dbName = normalizeIdentifier(part.getDbName());
+       String tblName = normalizeIdentifier(part.getTableName());
+       String catName = part.isSetCatName() ? normalizeIdentifier(part.getCatName()) : DEFAULT_CATALOG_NAME;
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       sharedCache.addPartitionToCache(catName, dbName, tblName, part);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public boolean addPartitions(String catName, String dbName, String tblName, List<Partition> parts)
+       throws InvalidObjectException, MetaException {
+     boolean succ = rawStore.addPartitions(catName, dbName, tblName, parts);
+     if (succ) {
+       catName = normalizeIdentifier(catName);
+       dbName = normalizeIdentifier(dbName);
+       tblName = normalizeIdentifier(tblName);
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       sharedCache.addPartitionsToCache(catName, dbName, tblName, parts);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec,
+       boolean ifNotExists) throws InvalidObjectException, MetaException {
+     boolean succ = rawStore.addPartitions(catName, dbName, tblName, partitionSpec, ifNotExists);
+     if (succ) {
+       catName = normalizeIdentifier(catName);
+       dbName = normalizeIdentifier(dbName);
+       tblName = normalizeIdentifier(tblName);
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       PartitionSpecProxy.PartitionIterator iterator = partitionSpec.getPartitionIterator();
+       while (iterator.hasNext()) {
+         Partition part = iterator.next();
+         sharedCache.addPartitionToCache(catName, dbName, tblName, part);
+       }
+     }
+     return succ;
+   }
+ 
+   @Override
+   public Partition getPartition(String catName, String dbName, String tblName, List<String> part_vals)
+       throws MetaException, NoSuchObjectException {
++    return getPartition(catName, dbName, tblName, part_vals, -1, null);
++  }
++
++  // TODO: the same as getTable()
++  @Override
++  public Partition getPartition(String catName, String dbName, String tblName,
++                                List<String> part_vals, long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException {
+     catName = normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
 -      return rawStore.getPartition(catName, dbName, tblName, part_vals);
++      return rawStore.getPartition(
++          catName, dbName, tblName, part_vals, txnId, writeIdList);
+     }
+     Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, part_vals);
 -    if (part == null) {
++    if (part == null || writeIdList != null) {
+       // The table containing the partition is not yet loaded in cache
 -      return rawStore.getPartition(catName, dbName, tblName, part_vals);
++      return rawStore.getPartition(
++          catName, dbName, tblName, part_vals, txnId, writeIdList);
+     }
+     return part;
+   }
+ 
+   @Override
+   public boolean doesPartitionExist(String catName, String dbName, String tblName,
+       List<FieldSchema> partKeys, List<String> part_vals)
+       throws MetaException, NoSuchObjectException {
+     catName = normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, part_vals);
+     }
+     Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (tbl == null) {
+       // The table containing the partition is not yet loaded in cache
+       return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, part_vals);
+     }
+     return sharedCache.existPartitionFromCache(catName, dbName, tblName, part_vals);
+   }
+ 
+   @Override
+   public boolean dropPartition(String catName, String dbName, String tblName, List<String> part_vals)
+       throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException {
+     boolean succ = rawStore.dropPartition(catName, dbName, tblName, part_vals);
+     if (succ) {
+       catName = normalizeIdentifier(catName);
+       dbName = normalizeIdentifier(dbName);
+       tblName = normalizeIdentifier(tblName);
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       sharedCache.removePartitionFromCache(catName, dbName, tblName, part_vals);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public void dropPartitions(String catName, String dbName, String tblName, List<String> partNames)
+       throws MetaException, NoSuchObjectException {
+     rawStore.dropPartitions(catName, dbName, tblName, partNames);
+     catName = normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return;
+     }
+     List<List<String>> partVals = new ArrayList<>();
+     for (String partName : partNames) {
+       partVals.add(partNameToVals(partName));
+     }
+     sharedCache.removePartitionsFromCache(catName, dbName, tblName, partVals);
+   }
+ 
+   @Override
+   public List<Partition> getPartitions(String catName, String dbName, String tblName, int max)
+       throws MetaException, NoSuchObjectException {
+     catName = normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.getPartitions(catName, dbName, tblName, max);
+     }
+     Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (tbl == null) {
+       // The table containing the partitions is not yet loaded in cache
+       return rawStore.getPartitions(catName, dbName, tblName, max);
+     }
+     List<Partition> parts = sharedCache.listCachedPartitions(catName, dbName, tblName, max);
+     return parts;
+   }
+ 
+   @Override
+   public Map<String, String> getPartitionLocations(String catName, String dbName, String tblName,
+       String baseLocationToNotShow, int max) {
+     return rawStore.getPartitionLocations(catName, dbName, tblName, baseLocationToNotShow, max);
+   }
+ 
+   @Override
 -  public void alterTable(String catName, String dbName, String tblName, Table newTable)
 -      throws InvalidObjectException, MetaException {
 -    rawStore.alterTable(catName, dbName, tblName, newTable);
++  public void alterTable(String catName, String dbName, String tblName, Table newTable,
++      long txnId, String validWriteIds) throws InvalidObjectException, MetaException {
++    rawStore.alterTable(catName, dbName, tblName, newTable, txnId, validWriteIds);
+     catName = normalizeIdentifier(catName);
+     dbName = normalizeIdentifier(dbName);
+     tblName = normalizeIdentifier(tblName);
+     String newTblName = normalizeIdentifier(newTable.getTableName());
+     if (!shouldCacheTable(catName, dbName, tblName) &&
+         !shouldCacheTable(catName, dbName, newTblName)) {
+       return;
+     }
+     Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (tbl == null) {
+       // The table is not yet loaded in cache
+       return;
+     }
+     if (shouldCacheTable(catName, dbName, tblName) && shouldCacheTable(catName, dbName, newTblName)) {
+       // If old table is in the cache and the new table can also be cached
+       sharedCache.alterTableInCache(catName, dbName, tblName, newTable);
+     } else if (!shouldCacheTable(catName, dbName, tblName) && shouldCacheTable(catName, dbName, newTblName)) {
+       // If old table is *not* in the cache but the new table can be cached
+       sharedCache.addTableToCache(catName, dbName, newTblName, newTable);
+     } else if (shouldCacheTable(catName, dbName, tblName) && !shouldCacheTable(catName, dbName, newTblName)) {
+       // If old table is in the cache but the new table *cannot* be cached
+       sharedCache.removeTableFromCache(catName, dbName, tblName);
+     }
+   }
+ 
+   @Override
+   public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm)
+       throws MetaException {
+     rawStore.updateCreationMetadata(catName, dbname, tablename, cm);
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String pattern) throws MetaException {
+     if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) {
+       return rawStore.getTables(catName, dbName, pattern);
+     }
+     return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName),
+         StringUtils.normalizeIdentifier(dbName), pattern, (short) -1);
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String pattern, TableType tableType)
+       throws MetaException {
+     if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) {
+       return rawStore.getTables(catName, dbName, pattern, tableType);
+     }
+     return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName),
+         StringUtils.normalizeIdentifier(dbName), pattern, tableType);
+   }
+ 
+   @Override
+   public List<String> getMaterializedViewsForRewriting(String catName, String dbName)
+       throws MetaException, NoSuchObjectException {
+     return rawStore.getMaterializedViewsForRewriting(catName, dbName);
+   }
+ 
+   @Override
+   public List<TableMeta> getTableMeta(String catName, String dbNames, String tableNames,
+                                       List<String> tableTypes) throws MetaException {
+     // TODO Check if all required tables are allowed, if so, get it from cache
+     if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) {
+       return rawStore.getTableMeta(catName, dbNames, tableNames, tableTypes);
+     }
+     return sharedCache.getTableMeta(StringUtils.normalizeIdentifier(catName),
+         StringUtils.normalizeIdentifier(dbNames),
+         StringUtils.normalizeIdentifier(tableNames), tableTypes);
+   }
+ 
+   @Override
+   public List<Table> getTableObjectsByName(String catName, String dbName, List<String> tblNames)
+       throws MetaException, UnknownDBException {
+     dbName = normalizeIdentifier(dbName);
+     catName = normalizeIdentifier(catName);
+     boolean missSomeInCache = false;
+     for (String tblName : tblNames) {
+       tblName = normalizeIdentifier(tblName);
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         missSomeInCache = true;
+         break;
+       }
+     }
+     if (!isCachePrewarmed.get() || missSomeInCache) {
+       return rawStore.getTableObjectsByName(catName, dbName, tblNames);
+     }
+     List<Table> tables = new ArrayList<>();
+     for (String tblName : tblNames) {
+       tblName = normalizeIdentifier(tblName);
+       Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
+       if (tbl == null) {
+         tbl = rawStore.getTable(catName, dbName, tblName);
+       }
+       tables.add(tbl);
+     }
+     return tables;
+   }
+ 
+   @Override
+   public List<String> getAllTables(String catName, String dbName) throws MetaException {
+     if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) {
+       return rawStore.getAllTables(catName, dbName);
+     }
+     return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName),
+         StringUtils.normalizeIdentifier(dbName));
+   }
+ 
+   @Override
+   public List<String> listTableNamesByFilter(String catName, String dbName, String filter,
+                                              short max_tables)
+       throws MetaException, UnknownDBException {
+     if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) {
+       return rawStore.listTableNamesByFilter(catName, dbName, filter, max_tables);
+     }
+     return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName),
+         StringUtils.normalizeIdentifier(dbName), filter, max_tables);
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String catName, String dbName, String tblName,
+       short max_parts) throws MetaException {
+     catName = StringUtils.normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.listPartitionNames(catName, dbName, tblName, max_parts);
+     }
+     Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (tbl == null) {
+       // The table is not yet loaded in cache
+       return rawStore.listPartitionNames(catName, dbName, tblName, max_parts);
+     }
+     List<String> partitionNames = new ArrayList<>();
+     int count = 0;
+     for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, max_parts)) {
+       if (max_parts == -1 || count < max_parts) {
+         partitionNames.add(Warehouse.makePartName(tbl.getPartitionKeys(), part.getValues()));
+       }
+     }
+     return partitionNames;
+   }
+ 
+   @Override
+   public PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name,
+       List<FieldSchema> cols, boolean applyDistinct, String filter, boolean ascending,
+       List<FieldSchema> order, long maxParts) throws MetaException {
+     throw new UnsupportedOperationException();
+   }
+ 
+   @Override
+   public void alterPartition(String catName, String dbName, String tblName, List<String> partVals,
 -                             Partition newPart) throws InvalidObjectException, MetaException {
 -    rawStore.alterPartition(catName, dbName, tblName, partVals, newPart);
++                             Partition newPart, long queryTxnId, String queryValidWriteIds)
++                                 throws InvalidObjectException, MetaException {
++    rawStore.alterPartition(catName, dbName, tblName, partVals, newPart, queryTxnId, queryValidWriteIds);
+     catName = normalizeIdentifier(catName);
+     dbName = normalizeIdentifier(dbName);
+     tblName = normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return;
+     }
+     sharedCache.alterPartitionInCache(catName, dbName, tblName, partVals, newPart);
+   }
+ 
+   @Override
+   public void alterPartitions(String catName, String dbName, String tblName,
 -                              List<List<String>> partValsList, List<Partition> newParts)
++                              List<List<String>> partValsList, List<Partition> newParts,
++                              long writeId, long txnId, String validWriteIds)
+       throws InvalidObjectException, MetaException {
 -    rawStore.alterPartitions(catName, dbName, tblName, partValsList, newParts);
++    rawStore.alterPartitions(
++        catName, dbName, tblName, partValsList, newParts, writeId, txnId, validWriteIds);
+     catName = normalizeIdentifier(catName);
+     dbName = normalizeIdentifier(dbName);
+     tblName = normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return;
+     }
++    // TODO: modify the following method for the case when writeIdList != null.
+     sharedCache.alterPartitionsInCache(catName, dbName, tblName, partValsList, newParts);
+   }
+ 
+   private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr,
+       String defaultPartName, short maxParts, List<String> result, SharedCache sharedCache)
+       throws MetaException, NoSuchObjectException {
+     List<Partition> parts =
+         sharedCache.listCachedPartitions(StringUtils.normalizeIdentifier(table.getCatName()),
+             StringUtils.normalizeIdentifier(table.getDbName()),
+             StringUtils.normalizeIdentifier(table.getTableName()), maxParts);
+     for (Partition part : parts) {
+       result.add(Warehouse.makePartName(table.getPartitionKeys(), part.getValues()));
+     }
+     if (defaultPartName == null || defaultPartName.isEmpty()) {
+       defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME);
+     }
+     return expressionProxy.filterPartitionsByExpr(table.getPartitionKeys(), expr, defaultPartName,
+         result);
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByFilter(String catName, String dbName, String tblName,
+       String filter, short maxParts)
+       throws MetaException, NoSuchObjectException {
+     return rawStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts);
+   }
+ 
+   @Override
+   public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr,
+       String defaultPartitionName, short maxParts, List<Partition> result) throws TException {
+     catName = StringUtils.normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts,
+           result);
+     }
+     List<String> partNames = new LinkedList<>();
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (table == null) {
+       // The table is not yet loaded in cache
+       return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts,
+           result);
+     }
+     boolean hasUnknownPartitions = getPartitionNamesPrunedByExprNoTxn(table, expr,
+         defaultPartitionName, maxParts, partNames, sharedCache);
+     return hasUnknownPartitions;
+   }
+ 
+   @Override
+   public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter)
+       throws MetaException, NoSuchObjectException {
+     return rawStore.getNumPartitionsByFilter(catName, dbName, tblName, filter);
+   }
+ 
+   @Override
+   public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr)
+       throws MetaException, NoSuchObjectException {
+     catName = normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr);
+     }
+     String defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME);
+     List<String> partNames = new LinkedList<>();
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (table == null) {
+       // The table is not yet loaded in cache
+       return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr);
+     }
+     getPartitionNamesPrunedByExprNoTxn(table, expr, defaultPartName, Short.MAX_VALUE, partNames,
+         sharedCache);
+     return partNames.size();
+   }
+ 
+   private static List<String> partNameToVals(String name) {
+     if (name == null) {
+       return null;
+     }
+     List<String> vals = new ArrayList<>();
+     String[] kvp = name.split("/");
+     for (String kv : kvp) {
+       vals.add(FileUtils.unescapePathName(kv.substring(kv.indexOf('=') + 1)));
+     }
+     return vals;
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByNames(String catName, String dbName, String tblName,
+       List<String> partNames) throws MetaException, NoSuchObjectException {
+     catName = StringUtils.normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames);
+     }
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (table == null) {
+       // The table is not yet loaded in cache
+       return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames);
+     }
+     List<Partition> partitions = new ArrayList<>();
+     for (String partName : partNames) {
+       Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, partNameToVals(partName));
+       if (part!=null) {
+         partitions.add(part);
+       }
+     }
+     return partitions;
+   }
+ 
+   @Override
+   public Table markPartitionForEvent(String catName, String dbName, String tblName,
+       Map<String, String> partVals, PartitionEventType evtType)
+       throws MetaException, UnknownTableException, InvalidPartitionException,
+       UnknownPartitionException {
+     return rawStore.markPartitionForEvent(catName, dbName, tblName, partVals, evtType);
+   }
+ 
+   @Override
+   public boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName,
+       Map<String, String> partName, PartitionEventType evtType)
+       throws MetaException, UnknownTableException, InvalidPartitionException,
+       UnknownPartitionException {
+     return rawStore.isPartitionMarkedForEvent(catName, dbName, tblName, partName, evtType);
+   }
+ 
+   @Override
+   public boolean addRole(String rowName, String ownerName)
+       throws InvalidObjectException, MetaException, NoSuchObjectException {
+     return rawStore.addRole(rowName, ownerName);
+   }
+ 
+   @Override
+   public boolean removeRole(String roleName)
+       throws MetaException, NoSuchObjectException {
+     return rawStore.removeRole(roleName);
+   }
+ 
+   @Override
+   public boolean grantRole(Role role, String userName,
+       PrincipalType principalType, String grantor, PrincipalType grantorType,
+       boolean grantOption)
+       throws MetaException, NoSuchObjectException, InvalidObjectException {
+     return rawStore.grantRole(role, userName, principalType, grantor, grantorType, grantOption);
+   }
+ 
+   @Override
+   public boolean revokeRole(Role role, String userName,
+       PrincipalType principalType, boolean grantOption)
+       throws MetaException, NoSuchObjectException {
+     return rawStore.revokeRole(role, userName, principalType, grantOption);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getUserPrivilegeSet(String userName,
+       List<String> groupNames) throws InvalidObjectException, MetaException {
+     return rawStore.getUserPrivilegeSet(userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getDBPrivilegeSet(String catName, String dbName, String userName,
+       List<String> groupNames) throws InvalidObjectException, MetaException {
+     return rawStore.getDBPrivilegeSet(catName, dbName, userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getTablePrivilegeSet(String catName, String dbName,
+       String tableName, String userName, List<String> groupNames)
+       throws InvalidObjectException, MetaException {
+     return rawStore.getTablePrivilegeSet(catName, dbName, tableName, userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getPartitionPrivilegeSet(String catName, String dbName,
+       String tableName, String partition, String userName,
+       List<String> groupNames) throws InvalidObjectException, MetaException {
+     return rawStore.getPartitionPrivilegeSet(catName, dbName, tableName, partition, userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getColumnPrivilegeSet(String catName, String dbName,
+       String tableName, String partitionName, String columnName,
+       String userName, List<String> groupNames)
+       throws InvalidObjectException, MetaException {
+     return rawStore.getColumnPrivilegeSet(catName, dbName, tableName, partitionName, columnName, userName, groupNames);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalGlobalGrants(
+       String principalName, PrincipalType principalType) {
+     return rawStore.listPrincipalGlobalGrants(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalDBGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName) {
+     return rawStore.listPrincipalDBGrants(principalName, principalType, catName, dbName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listAllTableGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName, String tableName) {
+     return rawStore.listAllTableGrants(principalName, principalType, catName, dbName, tableName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionGrants(
+       String principalName, PrincipalType principalType, String catName, String dbName,
+       String tableName, List<String> partValues, String partName) {
+     return rawStore.listPrincipalPartitionGrants(principalName, principalType, catName, dbName, tableName, partValues, partName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalTableColumnGrants(
+       String principalName, PrincipalType principalType, String catName, String dbName,
+       String tableName, String columnName) {
+     return rawStore.listPrincipalTableColumnGrants(principalName, principalType, catName, dbName, tableName, columnName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrants(
+       String principalName, PrincipalType principalType, String catName, String dbName,
+       String tableName, List<String> partValues, String partName,
+       String columnName) {
+     return rawStore.listPrincipalPartitionColumnGrants(principalName, principalType, catName, dbName, tableName, partValues, partName, columnName);
+   }
+ 
+   @Override
+   public boolean grantPrivileges(PrivilegeBag privileges)
+       throws InvalidObjectException, MetaException, NoSuchObjectException {
+     return rawStore.grantPrivileges(privileges);
+   }
+ 
+   @Override
+   public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption)
+       throws InvalidObjectException, MetaException, NoSuchObjectException {
+     return rawStore.revokePrivileges(privileges, grantOption);
+   }
+ 
+   @Override
+   public boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, PrivilegeBag grantPrivileges)
+       throws InvalidObjectException, MetaException, NoSuchObjectException {
+     return rawStore.refreshPrivileges(objToRefresh, authorizer, grantPrivileges);
+   }
+ 
+   @Override
+   public Role getRole(String roleName) throws NoSuchObjectException {
+     return rawStore.getRole(roleName);
+   }
+ 
+   @Override
+   public List<String> listRoleNames() {
+     return rawStore.listRoleNames();
+   }
+ 
+   @Override
+   public List<Role> listRoles(String principalName,
+       PrincipalType principalType) {
+     return rawStore.listRoles(principalName, principalType);
+   }
+ 
+   @Override
+   public List<RolePrincipalGrant> listRolesWithGrants(String principalName,
+       PrincipalType principalType) {
+     return rawStore.listRolesWithGrants(principalName, principalType);
+   }
+ 
+   @Override
+   public List<RolePrincipalGrant> listRoleMembers(String roleName) {
+     return rawStore.listRoleMembers(roleName);
+   }
+ 
+   @Override
+   public Partition getPartitionWithAuth(String catName, String dbName, String tblName,
+       List<String> partVals, String userName, List<String> groupNames)
+       throws MetaException, NoSuchObjectException, InvalidObjectException {
+     catName = StringUtils.normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames);
+     }
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (table == null) {
+       // The table is not yet loaded in cache
+       return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames);
+     }
+     Partition p = sharedCache.getPartitionFromCache(catName, dbName, tblName, partVals);
+     if (p != null) {
+       String partName = Warehouse.makePartName(table.getPartitionKeys(), partVals);
+       PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(catName, dbName, tblName, partName,
+           userName, groupNames);
+       p.setPrivileges(privs);
+     }
+     return p;
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsWithAuth(String catName, String dbName, String tblName,
+       short maxParts, String userName, List<String> groupNames)
+       throws MetaException, NoSuchObjectException, InvalidObjectException {
+     catName = StringUtils.normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames);
+     }
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (table == null) {
+       // The table is not yet loaded in cache
+       return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames);
+     }
+     List<Partition> partitions = new ArrayList<>();
+     int count = 0;
+     for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts)) {
+       if (maxParts == -1 || count < maxParts) {
+         String partName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues());
+         PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(catName, dbName, tblName, partName,
+             userName, groupNames);
+         part.setPrivileges(privs);
+         partitions.add(part);
+         count++;
+       }
+     }
+     return partitions;
+   }
+ 
+   @Override
+   public List<String> listPartitionNamesPs(String catName, String dbName, String tblName,
+       List<String> partVals, short maxParts)
+       throws MetaException, NoSuchObjectException {
+     catName = StringUtils.normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts);
+     }
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (table == null) {
+       // The table is not yet loaded in cache
+       return rawStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts);
+     }
+     List<String> partNames = new ArrayList<>();
+     int count = 0;
+     for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts)) {
+       boolean psMatch = true;
+       for (int i=0;i<partVals.size();i++) {
+         String psVal = partVals.get(i);
+         String partVal = part.getValues().get(i);
+         if (psVal!=null && !psVal.isEmpty() && !psVal.equals(partVal)) {
+           psMatch = false;
+           break;
+         }
+       }
+       if (!psMatch) {
+         continue;
+       }
+       if (maxParts == -1 || count < maxParts) {
+         partNames.add(Warehouse.makePartName(table.getPartitionKeys(), part.getValues()));
+         count++;
+       }
+     }
+     return partNames;
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsPsWithAuth(String catName, String dbName, String tblName,
+       List<String> partVals, short maxParts, String userName, List<String> groupNames)
+       throws MetaException, InvalidObjectException, NoSuchObjectException {
+     catName = StringUtils.normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partVals, maxParts, userName,
+           groupNames);
+     }
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (table == null) {
+       // The table is not yet loaded in cache
+       return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partVals, maxParts, userName,
+           groupNames);
+     }
+     List<Partition> partitions = new ArrayList<>();
+     int count = 0;
+     for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts)) {
+       boolean psMatch = true;
+       for (int i = 0; i < partVals.size(); i++) {
+         String psVal = partVals.get(i);
+         String partVal = part.getValues().get(i);
+         if (psVal != null && !psVal.isEmpty() && !psVal.equals(partVal)) {
+           psMatch = false;
+           break;
+         }
+       }
+       if (!psMatch) {
+         continue;
+       }
+       if (maxParts == -1 || count < maxParts) {
+         String partName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues());
+         PrincipalPrivilegeSet privs =
+             getPartitionPrivilegeSet(catName, dbName, tblName, partName, userName, groupNames);
+         part.setPrivileges(privs);
+         partitions.add(part);
+       }
+     }
+     return partitions;
+   }
+ 
+   @Override
 -  public boolean updateTableColumnStatistics(ColumnStatistics colStats)
++  public boolean updateTableColumnStatistics(ColumnStatistics colStats, long txnId, String validWriteIds, long writeId)
+       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
 -    boolean succ = rawStore.updateTableColumnStatistics(colStats);
++    boolean succ = rawStore.updateTableColumnStatistics(colStats, txnId, validWriteIds, writeId);
+     if (succ) {
+       String catName = colStats.getStatsDesc().isSetCatName() ?
+           normalizeIdentifier(colStats.getStatsDesc().getCatName()) :
+           getDefaultCatalog(conf);
+       String dbName = normalizeIdentifier(colStats.getStatsDesc().getDbName());
+       String tblName = normalizeIdentifier(colStats.getStatsDesc().getTableName());
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+       if (table == null) {
+         // The table is not yet loaded in cache
+         return succ;
+       }
+       List<ColumnStatisticsObj> statsObjs = colStats.getStatsObj();
+       List<String> colNames = new ArrayList<>();
+       for (ColumnStatisticsObj statsObj : statsObjs) {
+         colNames.add(statsObj.getColName());
+       }
+       StatsSetupConst.setColumnStatsState(table.getParameters(), colNames);
+       sharedCache.alterTableInCache(catName, dbName, tblName, table);
+       sharedCache.updateTableColStatsInCache(catName, dbName, tblName, statsObjs);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tblName,
+       List<String> colNames) throws MetaException, NoSuchObjectException {
++    return getTableColumnStatistics(catName, dbName, tblName, colNames, -1, null);
++  }
++
++  // TODO: the same as getTable()
++  @Override
++  public ColumnStatistics getTableColumnStatistics(
++      String catName, String dbName, String tblName, List<String> colNames,
++      long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException {
+     catName = StringUtils.normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
 -      return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
++      return rawStore.getTableColumnStatistics(
++          catName, dbName, tblName, colNames, txnId, writeIdList);
+     }
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
 -    if (table == null) {
++    if (table == null || writeIdList != null) {
+       // The table is not yet loaded in cache
 -      return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
++      return rawStore.getTableColumnStatistics(
++          catName, dbName, tblName, colNames, txnId, writeIdList);
+     }
+     ColumnStatisticsDesc csd = new ColumnStatisticsDesc(true, dbName, tblName);
+     List<ColumnStatisticsObj> colStatObjs =
+         sharedCache.getTableColStatsFromCache(catName, dbName, tblName, colNames);
+     return new ColumnStatistics(csd, colStatObjs);
+   }
+ 
+   @Override
+   public boolean deleteTableColumnStatistics(String catName, String dbName, String tblName,
+                                              String colName)
+       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
+     boolean succ = rawStore.deleteTableColumnStatistics(catName, dbName, tblName, colName);
+     if (succ) {
+       catName = normalizeIdentifier(catName);
+       dbName = normalizeIdentifier(dbName);
+       tblName = normalizeIdentifier(tblName);
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       sharedCache.removeTableColStatsFromCache(catName, dbName, tblName, colName);
+     }
+     return succ;
+   }
+ 
+   @Override
 -  public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, List<String> partVals)
++  public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, List<String> partVals,
++      long txnId, String validWriteIds, long writeId)
+       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
 -    boolean succ = rawStore.updatePartitionColumnStatistics(colStats, partVals);
++    boolean succ = rawStore.updatePartitionColumnStatistics(colStats, partVals, txnId, validWriteIds, writeId);
+     if (succ) {
+       String catName = colStats.getStatsDesc().isSetCatName() ?
+           normalizeIdentifier(colStats.getStatsDesc().getCatName()) : DEFAULT_CATALOG_NAME;
+       String dbName = normalizeIdentifier(colStats.getStatsDesc().getDbName());
+       String tblName = normalizeIdentifier(colStats.getStatsDesc().getTableName());
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       List<ColumnStatisticsObj> statsObjs = colStats.getStatsObj();
+       Partition part = getPartition(catName, dbName, tblName, partVals);
+       List<String> colNames = new ArrayList<>();
+       for (ColumnStatisticsObj statsObj : statsObjs) {
+         colNames.add(statsObj.getColName());
+       }
+       StatsSetupConst.setColumnStatsState(part.getParameters(), colNames);
+       sharedCache.alterPartitionInCache(catName, dbName, tblName, partVals, part);
+       sharedCache.updatePartitionColStatsInCache(catName, dbName, tblName, partVals, colStats.getStatsObj());
+     }
+     return succ;
+   }
+ 
+   @Override
+   // TODO: calculate from cached values.
+   public List<ColumnStatistics> getPartitionColumnStatistics(String catName, String dbName, String tblName,
+       List<String> partNames, List<String> colNames) throws MetaException, NoSuchObjectException {
+     return rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames);
+   }
+ 
+   @Override
++  public List<ColumnStatistics> getPartitionColumnStatistics(
++      String catName, String dbName, String tblName, List<String> partNames,
++      List<String> colNames, long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return rawStore.getPartitionColumnStatistics(
++        catName, dbName, tblName, partNames, colNames, txnId, writeIdList);
++  }
++
++  @Override
+   public boolean deletePartitionColumnStatistics(String catName, String dbName, String tblName, String partName,
+       List<String> partVals, String colName)
+       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
+     boolean succ =
+         rawStore.deletePartitionColumnStatistics(catName, dbName, tblName, partName, partVals, colName);
+     if (succ) {
+       catName = normalizeIdentifier(catName);
+       dbName = normalizeIdentifier(dbName);
+       tblName = normalizeIdentifier(tblName);
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       sharedCache.removePartitionColStatsFromCache(catName, dbName, tblName, partVals, colName);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List<String> partNames,
+       List<String> colNames) throws MetaException, NoSuchObjectException {
++    return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, -1, null);
++  }
++
++  @Override
++  // TODO: the same as getTable() for transactional stats.
++  public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName,
++                                      List<String> partNames, List<String> colNames,
++                                      long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException {
+     List<ColumnStatisticsObj> colStats;
+     catName = normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
 -      rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
++      rawStore.get_aggr_stats_for(
++          catName, dbName, tblName, partNames, colNames, txnId, writeIdList);
+     }
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
 -    if (table == null) {
++    if (table == null || writeIdList != null) {
+       // The table is not yet loaded in cache
 -      return rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
++      return rawStore.get_aggr_stats_for(
++          catName, dbName, tblName, partNames, colNames, txnId, writeIdList);
+     }
+     List<String> allPartNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1);
+     if (partNames.size() == allPartNames.size()) {
+       colStats = sharedCache.getAggrStatsFromCache(catName, dbName, tblName, colNames, StatsType.ALL);
+       if (colStats != null) {
+         return new AggrStats(colStats, partNames.size());
+       }
+     } else if (partNames.size() == (allPartNames.size() - 1)) {
+       String defaultPartitionName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME);
+       if (!partNames.contains(defaultPartitionName)) {
+         colStats =
+             sharedCache.getAggrStatsFromCache(catName, dbName, tblName, colNames, StatsType.ALLBUTDEFAULT);
+         if (colStats != null) {
+           return new AggrStats(colStats, partNames.size());
+         }
+       }
+     }
+     LOG.debug("Didn't find aggr stats in cache. Merging them. tblName= {}, parts= {}, cols= {}",
+         tblName, partNames, colNames);
+     MergedColumnStatsForPartitions mergedColStats =
+         mergeColStatsForPartitions(catName, dbName, tblName, partNames, colNames, sharedCache);
+     return new AggrStats(mergedColStats.getColStats(), mergedColStats.getPartsFound());
+   }
+ 
+   private MergedColumnStatsForPartitions mergeColStatsForPartitions(
+       String catName, String dbName, String tblName, List<String> partNames, List<String> colNames,
+       SharedCache sharedCache) throws MetaException {
+     final boolean useDensityFunctionForNDVEstimation =
+         MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_NDV_DENSITY_FUNCTION);
+     final double ndvTuner = MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER);
+     Map<ColumnStatsAggregator, List<ColStatsObjWithSourceInfo>> colStatsMap = new HashMap<>();
+     boolean areAllPartsFound = true;
+     long partsFound = 0;
+     for (String colName : colNames) {
+       long partsFoundForColumn = 0;
+       ColumnStatsAggregator colStatsAggregator = null;
+       List<ColStatsObjWithSourceInfo> colStatsWithPartInfoList = new ArrayList<>();
+       for (String partName : partNames) {
+         ColumnStatisticsObj colStatsForPart =
+             sharedCache.getPartitionColStatsFromCache(catName, dbName, tblName, partNameToVals(partName), colName);
+         if (colStatsForPart != null) {
+           ColStatsObjWithSourceInfo colStatsWithPartInfo =
+               new ColStatsObjWithSourceInfo(colStatsForPart, catName, dbName, tblName, partName);
+           colStatsWithPartInfoList.add(colStatsWithPartInfo);
+           if (colStatsAggregator == null) {
+             colStatsAggregator = ColumnStatsAggregatorFactory.getColumnStatsAggregator(
+                 colStatsForPart.getStatsData().getSetField(), useDensityFunctionForNDVEstimation,
+                 ndvTuner);
+           }
+           partsFoundForColumn++;
+         } else {
+           LOG.debug(
+               "Stats not found in CachedStore for: dbName={} tblName={} partName={} colName={}",
+               dbName, tblName, partName, colName);
+         }
+       }
+       if (colStatsWithPartInfoList.size() > 0) {
+         colStatsMap.put(colStatsAggregator, colStatsWithPartInfoList);
+       }
+       if (partsFoundForColumn == partNames.size()) {
+         partsFound++;
+       }
+       if (colStatsMap.size() < 1) {
+         LOG.debug("No stats data found for: dbName={} tblName= {} partNames= {} colNames= ", dbName,
+             tblName, partNames, colNames);
+         return new MergedColumnStatsForPartitions(new ArrayList<ColumnStatisticsObj>(), 0);
+       }
+     }
+     // Note that enableBitVector does not apply here because ColumnStatisticsObj
+     // itself will tell whether bitvector is null or not and aggr logic can automatically apply.
+     return new MergedColumnStatsForPartitions(MetaStoreUtils.aggrPartitionStats(colStatsMap,
+         partNames, areAllPartsFound, useDensityFunctionForNDVEstimation, ndvTuner), partsFound);
+   }
+ 
+   class MergedColumnStatsForPartitions {
+     List<ColumnStatisticsObj> colStats = new ArrayList<ColumnStatisticsObj>();
+     long partsFound;
+ 
+     MergedColumnStatsForPartitions(List<ColumnStatisticsObj> colStats, long partsFound) {
+       this.colStats = colStats;
+       this.partsFound = partsFound;
+     }
+ 
+     List<ColumnStatisticsObj> getColStats() {
+       return colStats;
+     }
+ 
+     long getPartsFound() {
+       return partsFound;
+     }
+   }
+ 
+   @Override
+   public long cleanupEvents() {
+     return rawStore.cleanupEvents();
+   }
+ 
+   @Override
+   public boolean addToken(String tokenIdentifier, String delegationToken) {
+     return rawStore.addToken(tokenIdentifier, delegationToken);
+   }
+ 
+   @Override
+   public boolean removeToken(String tokenIdentifier) {
+     return rawStore.removeToken(tokenIdentifier);
+   }
+ 
+   @Override
+   public String getToken(String tokenIdentifier) {
+     return rawStore.getToken(tokenIdentifier);
+   }
+ 
+   @Override
+   public List<String> getAllTokenIdentifiers() {
+    

<TRUNCATED>

[09/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index 0000000,2454479..a5e6918
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@@ -1,0 -1,1226 +1,1268 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import org.apache.hadoop.hive.common.TableName;
 -import org.apache.hadoop.hive.metastore.api.CreationMetadata;
 -import org.apache.hadoop.hive.metastore.api.ISchemaName;
 -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
 -import org.apache.hadoop.hive.metastore.api.Catalog;
 -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
 -
 -import java.nio.ByteBuffer;
 -import java.util.ArrayList;
 -import java.util.Collections;
 -import java.util.List;
 -import java.util.Map;
 -
 -import org.apache.hadoop.conf.Configurable;
 -import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.api.AggrStats;
+ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
++import org.apache.hadoop.hive.metastore.api.Catalog;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
++import org.apache.hadoop.hive.metastore.api.CreationMetadata;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
+ import org.apache.hadoop.hive.metastore.api.Function;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
+ import org.apache.hadoop.hive.metastore.api.ISchema;
+ import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+ import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+ import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
+ import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+ import org.apache.hadoop.hive.metastore.api.PrincipalType;
+ import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+ import org.apache.hadoop.hive.metastore.api.WMNullablePool;
+ import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
+ import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
+ import org.apache.hadoop.hive.metastore.api.WMTrigger;
+ import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
+ import org.apache.hadoop.hive.metastore.api.Role;
+ import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
+ import org.apache.hadoop.hive.metastore.api.RuntimeStat;
+ import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+ import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+ import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
+ import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.api.TableMeta;
+ import org.apache.hadoop.hive.metastore.api.Type;
+ import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+ import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
+ import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+ import org.apache.hadoop.hive.metastore.api.WMMapping;
+ import org.apache.hadoop.hive.metastore.api.WMPool;
++import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
++import org.apache.hadoop.hive.metastore.api.ISchemaName;
++import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
++
++import java.nio.ByteBuffer;
++import java.util.ArrayList;
++import java.util.Collections;
++import java.util.List;
++import java.util.Map;
++
++import org.apache.hadoop.conf.Configurable;
++import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.api.WriteEventInfo;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.ColStatsObjWithSourceInfo;
+ import org.apache.thrift.TException;
+ 
+ /**
+  * A wrapper around {@link org.apache.hadoop.hive.metastore.ObjectStore}
+  * with the ability to control the result of commitTransaction().
+  * All other functions simply delegate to an embedded ObjectStore object.
+  * Ideally, we should have just extended ObjectStore instead of using
+  * delegation.  However, since HiveMetaStore uses a Proxy, this class must
+  * not inherit from any other class.
+  */
+ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
+ 
+   private final ObjectStore objectStore;
+   public DummyRawStoreControlledCommit() {
+     objectStore = new ObjectStore();
+   }
+ 
+  /**
+   * If true, shouldCommit() will simply call delegate commitTransaction() to the
+   * underlying ObjectStore.
+   * If false, shouldCommit() immediately returns false.
+   */
+   private static boolean shouldCommitSucceed = true;
+   public static void setCommitSucceed(boolean flag) {
+     shouldCommitSucceed = flag;
+   }
+ 
+   @Override
+   public boolean commitTransaction() {
+     if (shouldCommitSucceed) {
+       return objectStore.commitTransaction();
+     } else {
+       return false;
+     }
+   }
+ 
+   @Override
+   public boolean isActiveTransaction() {
+     return false;
+   }
+ 
+   // All remaining functions simply delegate to objectStore
+ 
+   @Override
+   public Configuration getConf() {
+     return objectStore.getConf();
+   }
+ 
+   @Override
+   public void setConf(Configuration conf) {
+     objectStore.setConf(conf);
+   }
+ 
+   @Override
+   public void shutdown() {
+     objectStore.shutdown();
+   }
+ 
+   @Override
+   public boolean openTransaction() {
+     return objectStore.openTransaction();
+   }
+ 
+   @Override
+   public void rollbackTransaction() {
+     objectStore.rollbackTransaction();
+   }
+ 
+   @Override
+   public void createCatalog(Catalog cat) throws MetaException {
+     objectStore.createCatalog(cat);
+   }
+ 
+   @Override
+   public void alterCatalog(String catName, Catalog cat) throws MetaException,
+       InvalidOperationException {
+     objectStore.alterCatalog(catName, cat);
+   }
+ 
+   @Override
+   public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException {
+     return objectStore.getCatalog(catalogName);
+   }
+ 
+   @Override
+   public List<String> getCatalogs() throws MetaException {
+     return objectStore.getCatalogs();
+   }
+ 
+   @Override
+   public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException {
+     objectStore.dropCatalog(catalogName);
+   }
+ 
+   @Override
+   public void createDatabase(Database db) throws InvalidObjectException, MetaException {
+     objectStore.createDatabase(db);
+   }
+ 
+   @Override
+   public Database getDatabase(String catName, String dbName) throws NoSuchObjectException {
+     return objectStore.getDatabase(catName, dbName);
+   }
+ 
+   @Override
+   public boolean dropDatabase(String catName, String dbName)
+       throws NoSuchObjectException, MetaException {
+     return objectStore.dropDatabase(catName, dbName);
+   }
+ 
+   @Override
+   public boolean alterDatabase(String catName, String dbName, Database db)
+       throws NoSuchObjectException, MetaException {
+ 
+     return objectStore.alterDatabase(catName, dbName, db);
+   }
+ 
+   @Override
+   public List<String> getDatabases(String catName, String pattern) throws MetaException {
+     return objectStore.getDatabases(catName, pattern);
+   }
+ 
+   @Override
+   public List<String> getAllDatabases(String catName) throws MetaException {
+     return objectStore.getAllDatabases(catName);
+   }
+ 
+   @Override
+   public boolean createType(Type type) {
+     return objectStore.createType(type);
+   }
+ 
+   @Override
+   public Type getType(String typeName) {
+     return objectStore.getType(typeName);
+   }
+ 
+   @Override
+   public boolean dropType(String typeName) {
+     return objectStore.dropType(typeName);
+   }
+ 
+   @Override
+   public void createTable(Table tbl) throws InvalidObjectException, MetaException {
+     objectStore.createTable(tbl);
+   }
+ 
+   @Override
+   public boolean dropTable(String catName, String dbName, String tableName)
+       throws MetaException, NoSuchObjectException,
+       InvalidObjectException, InvalidInputException {
+     return objectStore.dropTable(catName, dbName, tableName);
+   }
+ 
+   @Override
+   public Table getTable(String catName, String dbName, String tableName) throws MetaException {
+     return objectStore.getTable(catName, dbName, tableName);
+   }
+ 
+   @Override
++  public Table getTable(String catName, String dbName, String tableName, long txnId, String writeIdList)
++      throws MetaException {
++    return objectStore.getTable(catName, dbName, tableName, txnId, writeIdList);
++  }
++
++  @Override
+   public boolean addPartition(Partition part)
+       throws InvalidObjectException, MetaException {
+     return objectStore.addPartition(part);
+   }
+ 
+   @Override
+   public Partition getPartition(String catName, String dbName, String tableName, List<String> partVals)
+       throws MetaException, NoSuchObjectException {
+     return objectStore.getPartition(catName, dbName, tableName, partVals);
+   }
+ 
+   @Override
++  public Partition getPartition(String catName, String dbName, String tableName,
++                                List<String> partVals, long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return objectStore.getPartition(catName, dbName, tableName, partVals, txnId, writeIdList);
++  }
++
++  @Override
+   public boolean dropPartition(String catName, String dbName, String tableName, List<String> partVals)
+       throws MetaException, NoSuchObjectException,
+       InvalidObjectException, InvalidInputException {
+     return objectStore.dropPartition(catName, dbName, tableName, partVals);
+   }
+ 
+   @Override
+   public List<Partition> getPartitions(String catName, String dbName, String tableName, int max)
+       throws MetaException, NoSuchObjectException {
+     return objectStore.getPartitions(catName, dbName, tableName, max);
+   }
+ 
+   @Override
+   public Map<String, String> getPartitionLocations(String catName, String dbName, String tblName,
+       String baseLocationToNotShow, int max) {
+     return objectStore.getPartitionLocations(catName, dbName, tblName, baseLocationToNotShow, max);
+   }
+ 
+   @Override
 -  public void alterTable(String catName, String dbName, String name, Table newTable)
++  public void alterTable(String catName, String dbName, String name, Table newTable,
++      long queryTxnId, String queryValidWriteIds)
+       throws InvalidObjectException, MetaException {
 -    objectStore.alterTable(catName, dbName, name, newTable);
++    objectStore.alterTable(catName, dbName, name, newTable, queryTxnId, queryValidWriteIds);
+   }
+ 
+   @Override
+   public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm)
+       throws MetaException {
+     objectStore.updateCreationMetadata(catName, dbname, tablename, cm);
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String pattern) throws MetaException {
+     return objectStore.getTables(catName, dbName, pattern);
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String pattern, TableType tableType) throws MetaException {
+     return objectStore.getTables(catName, dbName, pattern, tableType);
+   }
+ 
+   @Override
+   public List<String> getMaterializedViewsForRewriting(String catName, String dbName)
+       throws MetaException, NoSuchObjectException {
+     return objectStore.getMaterializedViewsForRewriting(catName, dbName);
+   }
+ 
+   @Override
+   public List<TableMeta> getTableMeta(String catName, String dbNames, String tableNames, List<String> tableTypes)
+       throws MetaException {
+     return objectStore.getTableMeta(catName, dbNames, tableNames, tableTypes);
+   }
+ 
+   @Override
+   public List<Table> getTableObjectsByName(String catName, String dbName, List<String> tableNames)
+       throws MetaException, UnknownDBException {
+     return objectStore.getTableObjectsByName(catName, dbName, tableNames);
+   }
+ 
+   @Override
+   public List<String> getAllTables(String catName, String dbName) throws MetaException {
+     return objectStore.getAllTables(catName, dbName);
+   }
+ 
+   @Override
+   public List<String> listTableNamesByFilter(String catName, String dbName, String filter,
+       short maxTables) throws MetaException, UnknownDBException {
+     return objectStore.listTableNamesByFilter(catName, dbName, filter, maxTables);
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String catName, String dbName, String tblName, short maxParts)
+       throws MetaException {
+     return objectStore.listPartitionNames(catName, dbName, tblName, maxParts);
+   }
+ 
+   @Override
+   public PartitionValuesResponse listPartitionValues(String catName, String db_name,
+       String tbl_name, List<FieldSchema> cols, boolean applyDistinct, String filter,
+       boolean ascending, List<FieldSchema> order, long maxParts) throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void alterPartition(String catName, String dbName, String tblName, List<String> partVals,
 -      Partition newPart) throws InvalidObjectException, MetaException {
 -    objectStore.alterPartition(catName, dbName, tblName, partVals, newPart);
++      Partition newPart, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
++    objectStore.alterPartition(catName, dbName, tblName, partVals, newPart, queryTxnId, queryValidWriteIds);
+   }
+ 
+   @Override
+   public void alterPartitions(String catName, String dbName, String tblName,
 -      List<List<String>> partValsList, List<Partition> newParts)
 -      throws InvalidObjectException, MetaException {
 -    objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts);
++      List<List<String>> partValsList, List<Partition> newParts,
++      long writeId, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
++    objectStore.alterPartitions(
++        catName, dbName, tblName, partValsList, newParts, writeId, queryTxnId, queryValidWriteIds);
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByFilter(String catName, String dbName, String tblName,
+       String filter, short maxParts) throws MetaException, NoSuchObjectException {
+     return objectStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts);
+   }
+ 
+   @Override
+   public int getNumPartitionsByFilter(String catName, String dbName, String tblName,
+                                       String filter) throws MetaException, NoSuchObjectException {
+     return objectStore.getNumPartitionsByFilter(catName, dbName, tblName, filter);
+   }
+ 
+   @Override
+   public int getNumPartitionsByExpr(String catName, String dbName, String tblName,
+                                       byte[] expr) throws MetaException, NoSuchObjectException {
+     return objectStore.getNumPartitionsByExpr(catName, dbName, tblName, expr);
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByNames(String catName, String dbName, String tblName,
+       List<String> partNames) throws MetaException, NoSuchObjectException {
+     return objectStore.getPartitionsByNames(catName, dbName, tblName, partNames);
+   }
+ 
+   @Override
+   public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr,
+       String defaultPartitionName, short maxParts, List<Partition> result) throws TException {
+     return objectStore.getPartitionsByExpr(catName,
+         dbName, tblName, expr, defaultPartitionName, maxParts, result);
+   }
+ 
+   @Override
+   public Table markPartitionForEvent(String catName, String dbName, String tblName,
+       Map<String, String> partVals, PartitionEventType evtType)
+       throws MetaException, UnknownTableException, InvalidPartitionException,
+       UnknownPartitionException {
+     return objectStore.markPartitionForEvent(catName, dbName, tblName, partVals, evtType);
+   }
+ 
+   @Override
+   public boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName,
+       Map<String, String> partName, PartitionEventType evtType)
+       throws MetaException, UnknownTableException, InvalidPartitionException,
+       UnknownPartitionException {
+     return objectStore.isPartitionMarkedForEvent(catName, dbName, tblName, partName, evtType);
+   }
+ 
+   @Override
+   public boolean addRole(String rowName, String ownerName) throws InvalidObjectException,
+       MetaException, NoSuchObjectException {
+     return objectStore.addRole(rowName, ownerName);
+   }
+ 
+   @Override
+   public boolean removeRole(String roleName)
+       throws MetaException, NoSuchObjectException {
+     return objectStore.removeRole(roleName);
+   }
+ 
+   @Override
+   public boolean grantRole(Role role, String userName, PrincipalType principalType,
+       String grantor, PrincipalType grantorType, boolean grantOption)
+       throws MetaException, NoSuchObjectException, InvalidObjectException {
+     return objectStore.grantRole(role, userName, principalType, grantor, grantorType,
+         grantOption);
+   }
+ 
+   @Override
+   public boolean revokeRole(Role role, String userName, PrincipalType principalType, boolean grantOption)
+       throws MetaException, NoSuchObjectException {
+     return objectStore.revokeRole(role, userName, principalType, grantOption);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getUserPrivilegeSet(String userName,
+       List<String> groupNames) throws InvalidObjectException, MetaException {
+     return objectStore.getUserPrivilegeSet(userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getDBPrivilegeSet(String catName, String dbName, String userName,
+       List<String> groupNames) throws InvalidObjectException, MetaException {
+     return objectStore.getDBPrivilegeSet(catName, dbName, userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getTablePrivilegeSet(String catName, String dbName, String tableName,
+       String userName, List<String> groupNames)
+       throws InvalidObjectException, MetaException {
+     return objectStore.getTablePrivilegeSet(catName, dbName, tableName, userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getPartitionPrivilegeSet(String catName, String dbName, String tableName,
+       String partition, String userName, List<String> groupNames)
+       throws InvalidObjectException, MetaException {
+     return objectStore.getPartitionPrivilegeSet(catName, dbName, tableName, partition,
+         userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getColumnPrivilegeSet(String catName, String dbName, String tableName,
+       String partitionName, String columnName, String userName, List<String> groupNames)
+       throws InvalidObjectException, MetaException {
+     return objectStore.getColumnPrivilegeSet(catName, dbName, tableName, partitionName,
+         columnName, userName, groupNames);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalGlobalGrants(String principalName,
+       PrincipalType principalType) {
+     return objectStore.listPrincipalGlobalGrants(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalDBGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName) {
+     return objectStore.listPrincipalDBGrants(principalName, principalType, catName, dbName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listAllTableGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName, String tableName) {
+     return objectStore.listAllTableGrants(principalName, principalType,
+         catName, dbName, tableName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName, String tableName, List<String> partValues,
+       String partName) {
+     return objectStore.listPrincipalPartitionGrants(principalName, principalType,
+         catName, dbName, tableName, partValues, partName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalTableColumnGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName, String tableName, String columnName) {
+     return objectStore.listPrincipalTableColumnGrants(principalName, principalType,
+         catName, dbName, tableName, columnName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrants(
+       String principalName, PrincipalType principalType, String catName, String dbName, String tableName,
+       List<String> partVals, String partName, String columnName) {
+     return objectStore.listPrincipalPartitionColumnGrants(principalName, principalType,
+         catName, dbName, tableName, partVals, partName, columnName);
+   }
+ 
+   @Override
+   public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectException,
+       MetaException, NoSuchObjectException {
+     return objectStore.grantPrivileges(privileges);
+   }
+ 
+   @Override
+   public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption)
+       throws InvalidObjectException, MetaException, NoSuchObjectException {
+     return objectStore.revokePrivileges(privileges, grantOption);
+   }
+ 
+   @Override
+   public boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, PrivilegeBag grantPrivileges)
+           throws InvalidObjectException, MetaException, NoSuchObjectException {
+     return objectStore.refreshPrivileges(objToRefresh, authorizer, grantPrivileges);
+   }
+   @Override
+   public Role getRole(String roleName) throws NoSuchObjectException {
+     return objectStore.getRole(roleName);
+   }
+ 
+   @Override
+   public List<String> listRoleNames() {
+     return objectStore.listRoleNames();
+   }
+ 
+   @Override
+   public List<Role> listRoles(String principalName, PrincipalType principalType) {
+     return objectStore.listRoles(principalName, principalType);
+   }
+ 
+   @Override
+   public List<RolePrincipalGrant> listRolesWithGrants(String principalName,
+                                                       PrincipalType principalType) {
+     return objectStore.listRolesWithGrants(principalName, principalType);
+   }
+ 
+   @Override
+   public List<RolePrincipalGrant> listRoleMembers(String roleName) {
+     return objectStore.listRoleMembers(roleName);
+   }
+ 
+   @Override
+   public Partition getPartitionWithAuth(String catName, String dbName, String tblName,
+       List<String> partVals, String userName, List<String> groupNames)
+       throws MetaException, NoSuchObjectException, InvalidObjectException {
+     return objectStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName,
+         groupNames);
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsWithAuth(String catName, String dbName, String tblName,
+       short maxParts, String userName, List<String> groupNames)
+       throws MetaException, NoSuchObjectException, InvalidObjectException {
+     return objectStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName,
+         groupNames);
+   }
+ 
+   @Override
+   public List<String> listPartitionNamesPs(String catName, String dbName, String tblName,
+       List<String> partVals, short maxParts)
+       throws MetaException, NoSuchObjectException {
+     return objectStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts);
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsPsWithAuth(String catName, String dbName, String tblName,
+       List<String> partVals, short maxParts, String userName, List<String> groupNames)
+       throws MetaException, InvalidObjectException, NoSuchObjectException {
+     return objectStore.listPartitionsPsWithAuth(catName, dbName, tblName, partVals, maxParts,
+         userName, groupNames);
+   }
+ 
+   @Override
+   public long cleanupEvents() {
+     return objectStore.cleanupEvents();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalDBGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return objectStore.listPrincipalDBGrantsAll(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalTableGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return objectStore.listPrincipalTableGrantsAll(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return objectStore.listPrincipalPartitionGrantsAll(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalTableColumnGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return objectStore.listPrincipalTableColumnGrantsAll(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return objectStore.listPrincipalPartitionColumnGrantsAll(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listGlobalGrantsAll() {
+     return objectStore.listGlobalGrantsAll();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listDBGrantsAll(String catName, String dbName) {
+     return objectStore.listDBGrantsAll(catName, dbName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPartitionColumnGrantsAll(String catName, String dbName, String tableName,
+       String partitionName, String columnName) {
+     return objectStore.listPartitionColumnGrantsAll(catName, dbName, tableName, partitionName, columnName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listTableGrantsAll(String catName, String dbName, String tableName) {
+     return objectStore.listTableGrantsAll(catName, dbName, tableName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPartitionGrantsAll(String catName, String dbName, String tableName,
+       String partitionName) {
+     return objectStore.listPartitionGrantsAll(catName, dbName, tableName, partitionName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listTableColumnGrantsAll(String catName, String dbName, String tableName,
+       String columnName) {
+     return objectStore.listTableColumnGrantsAll(catName, dbName, tableName, columnName);
+   }
+ 
+   @Override
+   public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName,
+       List<String> colNames) throws MetaException, NoSuchObjectException {
+     return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames);
+   }
+ 
+   @Override
++  public ColumnStatistics getTableColumnStatistics(String catName, String dbName,
++                                                   String tableName, List<String> colNames,
++                                                   long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return objectStore.getTableColumnStatistics(
++        catName, dbName, tableName, colNames, txnId, writeIdList);
++  }
++
++  @Override
+   public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName,
+                                              String colName)
+       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
+     return objectStore.deleteTableColumnStatistics(catName, dbName, tableName, colName);
+   }
+ 
+   @Override
+   public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName,
+       String partName, List<String> partVals, String colName)
+       throws NoSuchObjectException, MetaException, InvalidObjectException,
+       InvalidInputException {
+     return objectStore.deletePartitionColumnStatistics(catName, dbName, tableName, partName,
+         partVals, colName);
+   }
+ 
+   @Override
 -  public boolean updateTableColumnStatistics(ColumnStatistics statsObj)
++  public boolean updateTableColumnStatistics(ColumnStatistics statsObj, long txnId, String validWriteIds, long writeId)
+       throws NoSuchObjectException, MetaException, InvalidObjectException,
+       InvalidInputException {
 -    return objectStore.updateTableColumnStatistics(statsObj);
++    return objectStore.updateTableColumnStatistics(statsObj, txnId, validWriteIds, writeId);
+   }
+ 
+   @Override
+   public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,
 -      List<String> partVals)
++      List<String> partVals, long txnId, String validWriteIds, long writeId)
+       throws NoSuchObjectException, MetaException, InvalidObjectException,
+       InvalidInputException {
 -    return objectStore.updatePartitionColumnStatistics(statsObj, partVals);
++    return objectStore.updatePartitionColumnStatistics(statsObj, partVals, txnId, validWriteIds, writeId);
+   }
+ 
+   @Override
+   public boolean addToken(String tokenIdentifier, String delegationToken) {
+     return false;
+   }
+ 
+   @Override
+   public boolean removeToken(String tokenIdentifier) {
+     return false;
+   }
+ 
+   @Override
+   public String getToken(String tokenIdentifier) {
+     return "";
+   }
+ 
+   @Override
+   public List<String> getAllTokenIdentifiers() {
+     return new ArrayList<>();
+   }
+ 
+   @Override
+   public int addMasterKey(String key) throws MetaException {
+     return -1;
+   }
+ 
+   @Override
+   public void updateMasterKey(Integer seqNo, String key)
+     throws NoSuchObjectException, MetaException {}
+ 
+   @Override
+   public boolean removeMasterKey(Integer keySeq) {
+     return false;
+   }
+ 
+   @Override
+   public String[] getMasterKeys() {
+     return new String[0];
+   }
+ 
+   @Override
+   public void verifySchema() throws MetaException {
+   }
+ 
+   @Override
+   public String getMetaStoreSchemaVersion() throws MetaException {
+     return objectStore.getMetaStoreSchemaVersion();
+   }
+ 
+   @Override
+   public void setMetaStoreSchemaVersion(String schemaVersion, String comment) throws MetaException {
+     objectStore.setMetaStoreSchemaVersion(schemaVersion, comment);
+ 
+   }
+ 
+   @Override
+   public List<ColumnStatistics> getPartitionColumnStatistics(String catName, String dbName,
+       String tblName, List<String> colNames, List<String> partNames)
+       throws MetaException, NoSuchObjectException {
+     return objectStore.getPartitionColumnStatistics(catName, dbName, tblName  , colNames, partNames);
+   }
+ 
+   @Override
++  public List<ColumnStatistics> getPartitionColumnStatistics(
++      String catName, String dbName, String tblName, List<String> partNames,
++      List<String> colNames, long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return objectStore.getPartitionColumnStatistics(
++             catName, dbName, tblName  , colNames, partNames, txnId, writeIdList);
++  }
++
++  @Override
+   public boolean doesPartitionExist(String catName, String dbName, String tableName,
+       List<FieldSchema> partKeys, List<String> partVals)
+       throws MetaException, NoSuchObjectException {
+     return objectStore.doesPartitionExist(catName, dbName, tableName, partKeys, partVals);
+   }
+ 
+   @Override
+   public boolean addPartitions(String catName, String dbName, String tblName, List<Partition> parts)
+       throws InvalidObjectException, MetaException {
+     return objectStore.addPartitions(catName, dbName, tblName, parts);
+   }
+ 
+   @Override
+   public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException {
+     return false;
+   }
+ 
+   @Override
+   public void dropPartitions(String catName, String dbName, String tblName, List<String> partNames)
+       throws MetaException, NoSuchObjectException {
+     objectStore.dropPartitions(catName, dbName, tblName, partNames);
+   }
+ 
+   @Override
+   public void createFunction(Function func) throws InvalidObjectException,
+       MetaException {
+     objectStore.createFunction(func);
+   }
+ 
+   @Override
+   public void alterFunction(String catName, String dbName, String funcName, Function newFunction)
+       throws InvalidObjectException, MetaException {
+     objectStore.alterFunction(catName, dbName, funcName, newFunction);
+   }
+ 
+   @Override
+   public void dropFunction(String catName, String dbName, String funcName)
+       throws MetaException, NoSuchObjectException, InvalidObjectException,
+       InvalidInputException {
+     objectStore.dropFunction(catName, dbName, funcName);
+   }
+ 
+   @Override
+   public Function getFunction(String catName, String dbName, String funcName)
+       throws MetaException {
+     return objectStore.getFunction(catName, dbName, funcName);
+   }
+ 
+   @Override
+   public List<Function> getAllFunctions(String catName)
+           throws MetaException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> getFunctions(String catName, String dbName, String pattern)
+       throws MetaException {
+     return objectStore.getFunctions(catName, dbName, pattern);
+   }
+ 
+   @Override
+   public AggrStats get_aggr_stats_for(String catName, String dbName,
+       String tblName, List<String> partNames, List<String> colNames)
+       throws MetaException {
+     return null;
+   }
+ 
+   @Override
++  public AggrStats get_aggr_stats_for(String catName, String dbName,
++                                      String tblName, List<String> partNames,
++                                      List<String> colNames,
++                                      long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return null;
++  }
++
++  @Override
+   public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) {
+     return objectStore.getNextNotification(rqst);
+   }
+ 
+   @Override
+   public void addNotificationEvent(NotificationEvent event) throws MetaException {
+     objectStore.addNotificationEvent(event);
+   }
+ 
+   @Override
+   public void cleanNotificationEvents(int olderThan) {
+     objectStore.cleanNotificationEvents(olderThan);
+   }
+ 
+   @Override
+   public CurrentNotificationEventId getCurrentNotificationEventId() {
+     return objectStore.getCurrentNotificationEventId();
+   }
+ 
+   @Override
+   public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) {
+     return  objectStore.getNotificationEventsCount(rqst);
+   }
+ 
+   @Override
+   public void flushCache() {
+     objectStore.flushCache();
+   }
+ 
+   @Override
+   public ByteBuffer[] getFileMetadata(List<Long> fileIds) {
+     return null;
+   }
+ 
+   @Override
+   public void putFileMetadata(
+       List<Long> fileIds, List<ByteBuffer> metadata, FileMetadataExprType type) {
+   }
+ 
+   @Override
+   public boolean isFileMetadataSupported() {
+     return false;
+   }
+ 
+ 
+   @Override
+   public void getFileMetadataByExpr(List<Long> fileIds, FileMetadataExprType type, byte[] expr,
+       ByteBuffer[] metadatas, ByteBuffer[] stripeBitsets, boolean[] eliminated) {
+   }
+ 
+   @Override
+   public int getTableCount() throws MetaException {
+     return objectStore.getTableCount();
+   }
+ 
+   @Override
+   public int getPartitionCount() throws MetaException {
+     return objectStore.getPartitionCount();
+   }
+ 
+   @Override
+   public int getDatabaseCount() throws MetaException {
+     return objectStore.getDatabaseCount();
+   }
+ 
+   @Override
+   public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) {
+     return null;
+   }
+ 
+   @Override
+   public List<SQLPrimaryKey> getPrimaryKeys(String catName, String db_name, String tbl_name)
+     throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLForeignKey> getForeignKeys(String catName, String parent_db_name,
+     String parent_tbl_name, String foreign_db_name, String foreign_tbl_name)
+     throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLUniqueConstraint> getUniqueConstraints(String catName, String db_name, String tbl_name)
+     throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLNotNullConstraint> getNotNullConstraints(String catName, String db_name, String tbl_name)
+     throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLDefaultConstraint> getDefaultConstraints(String catName, String db_name, String tbl_name)
+       throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLCheckConstraint> getCheckConstraints(String catName, String db_name, String tbl_name)
+       throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> createTableWithConstraints(Table tbl,
+     List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys,
+     List<SQLUniqueConstraint> uniqueConstraints,
+     List<SQLNotNullConstraint> notNullConstraints,
+     List<SQLDefaultConstraint> defaultConstraints,
+     List<SQLCheckConstraint> checkConstraints)
+     throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public void dropConstraint(String catName, String dbName, String tableName,
+    String constraintName, boolean missingOk) throws NoSuchObjectException {
+    // TODO Auto-generated method stub
+   }
+ 
+   @Override
+   public List<String> addPrimaryKeys(List<SQLPrimaryKey> pks)
+     throws InvalidObjectException, MetaException {
+     return null;
+   }
+ 
+   @Override
+   public List<String> addForeignKeys(List<SQLForeignKey> fks)
+     throws InvalidObjectException, MetaException {
+     return null;
+   }
+ 
+   @Override
+   public List<String> addUniqueConstraints(List<SQLUniqueConstraint> uks)
+     throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> addNotNullConstraints(List<SQLNotNullConstraint> nns)
+     throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> addDefaultConstraints(List<SQLDefaultConstraint> nns)
+       throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> addCheckConstraints(List<SQLCheckConstraint> nns)
+       throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public String getMetastoreDbUuid() throws MetaException {
+     throw new MetaException("Get metastore uuid is not implemented");
+   }
+ 
+   @Override
+   public void createResourcePlan(WMResourcePlan resourcePlan, String copyFrom, int defaultPoolSize)
+       throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException {
+     objectStore.createResourcePlan(resourcePlan, copyFrom, defaultPoolSize);
+   }
+ 
+   @Override
+   public WMFullResourcePlan getResourcePlan(String name) throws NoSuchObjectException,
+       MetaException {
+     return objectStore.getResourcePlan(name);
+   }
+ 
+   @Override
+   public List<WMResourcePlan> getAllResourcePlans() throws MetaException {
+     return objectStore.getAllResourcePlans();
+   }
+ 
+   @Override
+   public WMFullResourcePlan alterResourcePlan(String name, WMNullableResourcePlan resourcePlan,
+       boolean canActivateDisabled, boolean canDeactivate, boolean isReplace)
+       throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+           MetaException {
+     return objectStore.alterResourcePlan(
+       name, resourcePlan, canActivateDisabled, canDeactivate, isReplace);
+   }
+ 
+   @Override
+   public WMFullResourcePlan getActiveResourcePlan() throws MetaException {
+     return objectStore.getActiveResourcePlan();
+   }
+ 
+   @Override
+   public WMValidateResourcePlanResponse validateResourcePlan(String name)
+       throws NoSuchObjectException, InvalidObjectException, MetaException {
+     return objectStore.validateResourcePlan(name);
+   }
+ 
+   @Override
+   public void dropResourcePlan(String name) throws NoSuchObjectException, MetaException {
+     objectStore.dropResourcePlan(name);
+   }
+ 
+   @Override
+   public void createWMTrigger(WMTrigger trigger)
+       throws AlreadyExistsException, MetaException, NoSuchObjectException,
+           InvalidOperationException {
+     objectStore.createWMTrigger(trigger);
+   }
+ 
+   @Override
+   public void alterWMTrigger(WMTrigger trigger)
+       throws NoSuchObjectException, InvalidOperationException, MetaException {
+     objectStore.alterWMTrigger(trigger);
+   }
+ 
+   @Override
+   public void dropWMTrigger(String resourcePlanName, String triggerName)
+       throws NoSuchObjectException, InvalidOperationException, MetaException {
+     objectStore.dropWMTrigger(resourcePlanName, triggerName);
+   }
+ 
+   @Override
+   public List<WMTrigger> getTriggersForResourcePlan(String resourcePlanName)
+       throws NoSuchObjectException, MetaException {
+     return objectStore.getTriggersForResourcePlan(resourcePlanName);
+   }
+ 
+   @Override
+   public void createPool(WMPool pool) throws AlreadyExistsException, NoSuchObjectException,
+       InvalidOperationException, MetaException {
+     objectStore.createPool(pool);
+   }
+ 
+   @Override
+   public void alterPool(WMNullablePool pool, String poolPath) throws AlreadyExistsException,
+       NoSuchObjectException, InvalidOperationException, MetaException {
+     objectStore.alterPool(pool, poolPath);
+   }
+ 
+   @Override
+   public void dropWMPool(String resourcePlanName, String poolPath)
+       throws NoSuchObjectException, InvalidOperationException, MetaException {
+     objectStore.dropWMPool(resourcePlanName, poolPath);
+   }
+ 
+   @Override
+   public void createOrUpdateWMMapping(WMMapping mapping, boolean update)
+       throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+       MetaException {
+     objectStore.createOrUpdateWMMapping(mapping, update);
+   }
+ 
+   @Override
+   public void dropWMMapping(WMMapping mapping)
+       throws NoSuchObjectException, InvalidOperationException, MetaException {
+     objectStore.dropWMMapping(mapping);
+   }
+ 
+   @Override
+   public void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
+       String poolPath) throws AlreadyExistsException, NoSuchObjectException,
+       InvalidOperationException, MetaException {
+     objectStore.createWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath);
+   }
+ 
+   @Override
+   public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
+       String poolPath) throws NoSuchObjectException, InvalidOperationException, MetaException {
+     objectStore.dropWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath);
+   }
+ 
+ 
+   @Override
+   public List<ColStatsObjWithSourceInfo> getPartitionColStatsForDatabase(String catName, String dbName)
+       throws MetaException, NoSuchObjectException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public void createISchema(ISchema schema) throws AlreadyExistsException, MetaException,
+       NoSuchObjectException {
+     objectStore.createISchema(schema);
+   }
+ 
+   @Override
+   public void alterISchema(ISchemaName schemaName, ISchema newSchema) throws NoSuchObjectException,
+       MetaException {
+     objectStore.alterISchema(schemaName, newSchema);
+   }
+ 
+   @Override
+   public ISchema getISchema(ISchemaName schemaName) throws MetaException {
+     return objectStore.getISchema(schemaName);
+   }
+ 
+   @Override
+   public void dropISchema(ISchemaName schemaName) throws NoSuchObjectException, MetaException {
+     objectStore.dropISchema(schemaName);
+   }
+ 
+   @Override
+   public void addSchemaVersion(SchemaVersion schemaVersion) throws
+       AlreadyExistsException, InvalidObjectException, NoSuchObjectException, MetaException {
+     objectStore.addSchemaVersion(schemaVersion);
+   }
+ 
+   @Override
+   public void alterSchemaVersion(SchemaVersionDescriptor version, SchemaVersion newVersion) throws
+       NoSuchObjectException, MetaException {
+     objectStore.alterSchemaVersion(version, newVersion);
+   }
+ 
+   @Override
+   public SchemaVersion getSchemaVersion(SchemaVersionDescriptor version) throws MetaException {
+     return objectStore.getSchemaVersion(version);
+   }
+ 
+   @Override
+   public SchemaVersion getLatestSchemaVersion(ISchemaName schemaName) throws MetaException {
+     return objectStore.getLatestSchemaVersion(schemaName);
+   }
+ 
+   @Override
+   public List<SchemaVersion> getAllSchemaVersion(ISchemaName schemaName) throws MetaException {
+     return objectStore.getAllSchemaVersion(schemaName);
+   }
+ 
+   @Override
+   public List<SchemaVersion> getSchemaVersionsByColumns(String colName, String colNamespace,
+                                                         String type) throws MetaException {
+     return objectStore.getSchemaVersionsByColumns(colName, colNamespace, type);
+   }
+ 
+   @Override
+   public void dropSchemaVersion(SchemaVersionDescriptor version) throws NoSuchObjectException,
+       MetaException {
+     objectStore.dropSchemaVersion(version);
+   }
+ 
+   @Override
+   public SerDeInfo getSerDeInfo(String serDeName) throws NoSuchObjectException, MetaException {
+     return objectStore.getSerDeInfo(serDeName);
+   }
+ 
+   @Override
+   public void addSerde(SerDeInfo serde) throws AlreadyExistsException, MetaException {
+     objectStore.addSerde(serde);
+   }
+ 
+   @Override
+   public void addRuntimeStat(RuntimeStat stat) throws MetaException {
+     objectStore.addRuntimeStat(stat);
+   }
+ 
+   @Override
+   public List<RuntimeStat> getRuntimeStats(int maxEntries, int maxCreateTime) throws MetaException {
+     return objectStore.getRuntimeStats(maxEntries, maxCreateTime);
+   }
+ 
+   @Override
+   public int deleteRuntimeStats(int maxRetainSecs) throws MetaException {
+     return objectStore.deleteRuntimeStats(maxRetainSecs);
+   }
+ 
+   @Override
+   public void cleanWriteNotificationEvents(int olderThan) {
+     objectStore.cleanWriteNotificationEvents(olderThan);
+   }
+ 
+   @Override
+   public List<WriteEventInfo> getAllWriteEventInfo(long txnId, String dbName, String tableName) throws MetaException {
+     return objectStore.getAllWriteEventInfo(txnId, dbName, tableName);
+   }
+ 
+   @Override
+   public List<TableName> getTableNamesWithStats() throws MetaException,
+       NoSuchObjectException {
+     return null;
+   }
+ 
+   @Override
+   public List<TableName> getAllTableNamesForStats() throws MetaException,
+       NoSuchObjectException {
+     return null;
+   }
+ 
+   @Override
+   public Map<String, List<String>> getPartitionColsWithStats(String catName,
+       String dbName, String tableName) throws MetaException,
+       NoSuchObjectException {
+     return null;
+   }
+ }


[10/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
index 0000000,5d1a525..caa55d7
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
@@@ -1,0 -1,1866 +1,1868 @@@
+ --
+ -- PostgreSQL database dump
+ --
+ 
+ SET statement_timeout = 0;
+ SET client_encoding = 'UTF8';
+ SET standard_conforming_strings = off;
+ SET check_function_bodies = false;
+ SET client_min_messages = warning;
+ SET escape_string_warning = off;
+ 
+ SET search_path = public, pg_catalog;
+ 
+ SET default_tablespace = '';
+ 
+ SET default_with_oids = false;
+ 
+ --
+ -- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "BUCKETING_COLS" (
+     "SD_ID" bigint NOT NULL,
+     "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+     "INTEGER_IDX" bigint NOT NULL
+ );
+ 
+ 
+ --
+ -- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "CDS" (
+     "CD_ID" bigint NOT NULL
+ );
+ 
+ 
+ --
+ -- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "COLUMNS_V2" (
+     "CD_ID" bigint NOT NULL,
+     "COMMENT" character varying(4000),
+     "COLUMN_NAME" character varying(767) NOT NULL,
+     "TYPE_NAME" text,
+     "INTEGER_IDX" integer NOT NULL
+ );
+ 
+ 
+ --
+ -- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "DATABASE_PARAMS" (
+     "DB_ID" bigint NOT NULL,
+     "PARAM_KEY" character varying(180) NOT NULL,
+     "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+ );
+ 
+ 
+ CREATE TABLE "CTLGS" (
+     "CTLG_ID" BIGINT PRIMARY KEY,
+     "NAME" VARCHAR(256) UNIQUE,
+     "DESC" VARCHAR(4000),
+     "LOCATION_URI" VARCHAR(4000) NOT NULL
+ );
+ 
+ --
+ -- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "DBS" (
+     "DB_ID" bigint NOT NULL,
+     "DESC" character varying(4000) DEFAULT NULL::character varying,
+     "DB_LOCATION_URI" character varying(4000) NOT NULL,
+     "NAME" character varying(128) DEFAULT NULL::character varying,
+     "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+     "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying,
+     "CTLG_NAME" varchar(256)
+ );
+ 
+ 
+ --
+ -- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "DB_PRIVS" (
+     "DB_GRANT_ID" bigint NOT NULL,
+     "CREATE_TIME" bigint NOT NULL,
+     "DB_ID" bigint,
+     "GRANT_OPTION" smallint NOT NULL,
+     "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+     "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "DB_PRIV" character varying(128) DEFAULT NULL::character varying,
+     "AUTHORIZER" character varying(128) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "GLOBAL_PRIVS" (
+     "USER_GRANT_ID" bigint NOT NULL,
+     "CREATE_TIME" bigint NOT NULL,
+     "GRANT_OPTION" smallint NOT NULL,
+     "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+     "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "USER_PRIV" character varying(128) DEFAULT NULL::character varying,
+     "AUTHORIZER" character varying(128) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "IDXS" (
+     "INDEX_ID" bigint NOT NULL,
+     "CREATE_TIME" bigint NOT NULL,
+     "DEFERRED_REBUILD" boolean NOT NULL,
+     "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying,
+     "INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
+     "INDEX_TBL_ID" bigint,
+     "LAST_ACCESS_TIME" bigint NOT NULL,
+     "ORIG_TBL_ID" bigint,
+     "SD_ID" bigint
+ );
+ 
+ 
+ --
+ -- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "INDEX_PARAMS" (
+     "INDEX_ID" bigint NOT NULL,
+     "PARAM_KEY" character varying(256) NOT NULL,
+     "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "NUCLEUS_TABLES" (
+     "CLASS_NAME" character varying(128) NOT NULL,
+     "TABLE_NAME" character varying(128) NOT NULL,
+     "TYPE" character varying(4) NOT NULL,
+     "OWNER" character varying(2) NOT NULL,
+     "VERSION" character varying(20) NOT NULL,
+     "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "PARTITIONS" (
+     "PART_ID" bigint NOT NULL,
+     "CREATE_TIME" bigint NOT NULL,
+     "LAST_ACCESS_TIME" bigint NOT NULL,
+     "PART_NAME" character varying(767) DEFAULT NULL::character varying,
+     "SD_ID" bigint,
 -    "TBL_ID" bigint
++    "TBL_ID" bigint,
++    "WRITE_ID" bigint DEFAULT 0
+ );
+ 
+ 
+ --
+ -- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "PARTITION_EVENTS" (
+     "PART_NAME_ID" bigint NOT NULL,
+     "CAT_NAME" character varying(256),
+     "DB_NAME" character varying(128),
+     "EVENT_TIME" bigint NOT NULL,
+     "EVENT_TYPE" integer NOT NULL,
+     "PARTITION_NAME" character varying(767),
+     "TBL_NAME" character varying(256)
+ );
+ 
+ 
+ --
+ -- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "PARTITION_KEYS" (
+     "TBL_ID" bigint NOT NULL,
+     "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying,
+     "PKEY_NAME" character varying(128) NOT NULL,
+     "PKEY_TYPE" character varying(767) NOT NULL,
+     "INTEGER_IDX" bigint NOT NULL
+ );
+ 
+ 
+ --
+ -- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "PARTITION_KEY_VALS" (
+     "PART_ID" bigint NOT NULL,
+     "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying,
+     "INTEGER_IDX" bigint NOT NULL
+ );
+ 
+ 
+ --
+ -- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "PARTITION_PARAMS" (
+     "PART_ID" bigint NOT NULL,
+     "PARAM_KEY" character varying(256) NOT NULL,
+     "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "PART_COL_PRIVS" (
+     "PART_COLUMN_GRANT_ID" bigint NOT NULL,
+     "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+     "CREATE_TIME" bigint NOT NULL,
+     "GRANT_OPTION" smallint NOT NULL,
+     "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+     "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PART_ID" bigint,
+     "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
+     "AUTHORIZER" character varying(128) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "PART_PRIVS" (
+     "PART_GRANT_ID" bigint NOT NULL,
+     "CREATE_TIME" bigint NOT NULL,
+     "GRANT_OPTION" smallint NOT NULL,
+     "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+     "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PART_ID" bigint,
+     "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PART_PRIV" character varying(128) DEFAULT NULL::character varying,
+     "AUTHORIZER" character varying(128) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "ROLES" (
+     "ROLE_ID" bigint NOT NULL,
+     "CREATE_TIME" bigint NOT NULL,
+     "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+     "ROLE_NAME" character varying(128) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "ROLE_MAP" (
+     "ROLE_GRANT_ID" bigint NOT NULL,
+     "ADD_TIME" bigint NOT NULL,
+     "GRANT_OPTION" smallint NOT NULL,
+     "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+     "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "ROLE_ID" bigint
+ );
+ 
+ 
+ --
+ -- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "SDS" (
+     "SD_ID" bigint NOT NULL,
+     "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+     "IS_COMPRESSED" boolean NOT NULL,
+     "LOCATION" character varying(4000) DEFAULT NULL::character varying,
+     "NUM_BUCKETS" bigint NOT NULL,
+     "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+     "SERDE_ID" bigint,
+     "CD_ID" bigint,
+     "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL
+ );
+ 
+ 
+ --
+ -- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "SD_PARAMS" (
+     "SD_ID" bigint NOT NULL,
+     "PARAM_KEY" character varying(256) NOT NULL,
+     "PARAM_VALUE" text DEFAULT NULL
+ );
+ 
+ 
+ --
+ -- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "SEQUENCE_TABLE" (
+     "SEQUENCE_NAME" character varying(255) NOT NULL,
+     "NEXT_VAL" bigint NOT NULL
+ );
+ 
+ INSERT INTO "SEQUENCE_TABLE" ("SEQUENCE_NAME", "NEXT_VAL") VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1);
+ 
+ --
+ -- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "SERDES" (
+     "SERDE_ID" bigint NOT NULL,
+     "NAME" character varying(128) DEFAULT NULL::character varying,
+     "SLIB" character varying(4000) DEFAULT NULL::character varying,
+     "DESCRIPTION" varchar(4000),
+     "SERIALIZER_CLASS" varchar(4000),
+     "DESERIALIZER_CLASS" varchar(4000),
+     "SERDE_TYPE" integer
+ );
+ 
+ 
+ --
+ -- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "SERDE_PARAMS" (
+     "SERDE_ID" bigint NOT NULL,
+     "PARAM_KEY" character varying(256) NOT NULL,
+     "PARAM_VALUE" text DEFAULT NULL
+ );
+ 
+ 
+ --
+ -- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "SORT_COLS" (
+     "SD_ID" bigint NOT NULL,
+     "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+     "ORDER" bigint NOT NULL,
+     "INTEGER_IDX" bigint NOT NULL
+ );
+ 
+ 
+ --
+ -- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "TABLE_PARAMS" (
+     "TBL_ID" bigint NOT NULL,
+     "PARAM_KEY" character varying(256) NOT NULL,
+     "PARAM_VALUE" text DEFAULT NULL
+ );
+ 
+ 
+ --
+ -- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "TBLS" (
+     "TBL_ID" bigint NOT NULL,
+     "CREATE_TIME" bigint NOT NULL,
+     "DB_ID" bigint,
+     "LAST_ACCESS_TIME" bigint NOT NULL,
+     "OWNER" character varying(767) DEFAULT NULL::character varying,
+     "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying,
+     "RETENTION" bigint NOT NULL,
+     "SD_ID" bigint,
+     "TBL_NAME" character varying(256) DEFAULT NULL::character varying,
+     "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "VIEW_EXPANDED_TEXT" text,
+     "VIEW_ORIGINAL_TEXT" text,
 -    "IS_REWRITE_ENABLED" boolean NOT NULL DEFAULT false
++    "IS_REWRITE_ENABLED" boolean NOT NULL DEFAULT false,
++    "WRITE_ID" bigint DEFAULT 0
+ );
+ 
+ --
+ -- Name: MV_CREATION_METADATA; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "MV_CREATION_METADATA" (
+     "MV_CREATION_METADATA_ID" bigint NOT NULL,
+     "CAT_NAME" character varying(256) NOT NULL,
+     "DB_NAME" character varying(128) NOT NULL,
+     "TBL_NAME" character varying(256) NOT NULL,
+     "TXN_LIST" text,
+     "MATERIALIZATION_TIME" bigint NOT NULL
+ );
+ 
+ --
+ -- Name: MV_TABLES_USED; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "MV_TABLES_USED" (
+     "MV_CREATION_METADATA_ID" bigint NOT NULL,
+     "TBL_ID" bigint NOT NULL
+ );
+ 
+ --
+ -- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "TBL_COL_PRIVS" (
+     "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
+     "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+     "CREATE_TIME" bigint NOT NULL,
+     "GRANT_OPTION" smallint NOT NULL,
+     "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+     "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
+     "TBL_ID" bigint,
+     "AUTHORIZER" character varying(128) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "TBL_PRIVS" (
+     "TBL_GRANT_ID" bigint NOT NULL,
+     "CREATE_TIME" bigint NOT NULL,
+     "GRANT_OPTION" smallint NOT NULL,
+     "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+     "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "TBL_PRIV" character varying(128) DEFAULT NULL::character varying,
+     "TBL_ID" bigint,
+     "AUTHORIZER" character varying(128) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "TYPES" (
+     "TYPES_ID" bigint NOT NULL,
+     "TYPE_NAME" character varying(128) DEFAULT NULL::character varying,
+     "TYPE1" character varying(767) DEFAULT NULL::character varying,
+     "TYPE2" character varying(767) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "TYPE_FIELDS" (
+     "TYPE_NAME" bigint NOT NULL,
+     "COMMENT" character varying(256) DEFAULT NULL::character varying,
+     "FIELD_NAME" character varying(128) NOT NULL,
+     "FIELD_TYPE" character varying(767) NOT NULL,
+     "INTEGER_IDX" bigint NOT NULL
+ );
+ 
+ CREATE TABLE "SKEWED_STRING_LIST" (
+     "STRING_LIST_ID" bigint NOT NULL
+ );
+ 
+ CREATE TABLE "SKEWED_STRING_LIST_VALUES" (
+     "STRING_LIST_ID" bigint NOT NULL,
+     "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying,
+     "INTEGER_IDX" bigint NOT NULL
+ );
+ 
+ CREATE TABLE "SKEWED_COL_NAMES" (
+     "SD_ID" bigint NOT NULL,
+     "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+     "INTEGER_IDX" bigint NOT NULL
+ );
+ 
+ CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" (
+     "SD_ID" bigint NOT NULL,
+     "STRING_LIST_ID_KID" bigint NOT NULL,
+     "LOCATION" character varying(4000) DEFAULT NULL::character varying
+ );
+ 
+ CREATE TABLE "SKEWED_VALUES" (
+     "SD_ID_OID" bigint NOT NULL,
+     "STRING_LIST_ID_EID" bigint NOT NULL,
+     "INTEGER_IDX" bigint NOT NULL
+ );
+ 
+ 
+ --
+ -- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE  "MASTER_KEYS"
+ (
+     "KEY_ID" SERIAL,
+     "MASTER_KEY" varchar(767) NULL,
+     PRIMARY KEY ("KEY_ID")
+ );
+ 
+ CREATE TABLE  "DELEGATION_TOKENS"
+ (
+     "TOKEN_IDENT" varchar(767) NOT NULL,
+     "TOKEN" varchar(767) NULL,
+     PRIMARY KEY ("TOKEN_IDENT")
+ );
+ 
+ CREATE TABLE "TAB_COL_STATS" (
+  "CS_ID" bigint NOT NULL,
+  "CAT_NAME" character varying(256) DEFAULT NULL::character varying,
+  "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+  "TABLE_NAME" character varying(256) DEFAULT NULL::character varying,
+  "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+  "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+  "TBL_ID" bigint NOT NULL,
+  "LONG_LOW_VALUE" bigint,
+  "LONG_HIGH_VALUE" bigint,
+  "DOUBLE_LOW_VALUE" double precision,
+  "DOUBLE_HIGH_VALUE" double precision,
+  "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+  "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+  "NUM_NULLS" bigint NOT NULL,
+  "NUM_DISTINCTS" bigint,
+  "BIT_VECTOR" bytea,
+  "AVG_COL_LEN" double precision,
+  "MAX_COL_LEN" bigint,
+  "NUM_TRUES" bigint,
+  "NUM_FALSES" bigint,
+  "LAST_ANALYZED" bigint NOT NULL
+ );
+ 
+ --
+ -- Table structure for VERSION
+ --
+ CREATE TABLE "VERSION" (
+   "VER_ID" bigint,
+   "SCHEMA_VERSION" character varying(127) NOT NULL,
+   "VERSION_COMMENT" character varying(255) NOT NULL
+ );
+ 
+ --
+ -- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "PART_COL_STATS" (
+  "CS_ID" bigint NOT NULL,
+  "CAT_NAME" character varying(256) DEFAULT NULL::character varying,
+  "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+  "TABLE_NAME" character varying(256) DEFAULT NULL::character varying,
+  "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
+  "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+  "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+  "PART_ID" bigint NOT NULL,
+  "LONG_LOW_VALUE" bigint,
+  "LONG_HIGH_VALUE" bigint,
+  "DOUBLE_LOW_VALUE" double precision,
+  "DOUBLE_HIGH_VALUE" double precision,
+  "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+  "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+  "NUM_NULLS" bigint NOT NULL,
+  "NUM_DISTINCTS" bigint,
+  "BIT_VECTOR" bytea,
+  "AVG_COL_LEN" double precision,
+  "MAX_COL_LEN" bigint,
+  "NUM_TRUES" bigint,
+  "NUM_FALSES" bigint,
+  "LAST_ANALYZED" bigint NOT NULL
+ );
+ 
+ --
+ -- Table structure for FUNCS
+ --
+ CREATE TABLE "FUNCS" (
+   "FUNC_ID" BIGINT NOT NULL,
+   "CLASS_NAME" VARCHAR(4000),
+   "CREATE_TIME" INTEGER NOT NULL,
+   "DB_ID" BIGINT,
+   "FUNC_NAME" VARCHAR(128),
+   "FUNC_TYPE" INTEGER NOT NULL,
+   "OWNER_NAME" VARCHAR(128),
+   "OWNER_TYPE" VARCHAR(10),
+   PRIMARY KEY ("FUNC_ID")
+ );
+ 
+ --
+ -- Table structure for FUNC_RU
+ --
+ CREATE TABLE "FUNC_RU" (
+   "FUNC_ID" BIGINT NOT NULL,
+   "RESOURCE_TYPE" INTEGER NOT NULL,
+   "RESOURCE_URI" VARCHAR(4000),
+   "INTEGER_IDX" INTEGER NOT NULL,
+   PRIMARY KEY ("FUNC_ID", "INTEGER_IDX")
+ );
+ 
+ CREATE TABLE "NOTIFICATION_LOG"
+ (
+     "NL_ID" BIGINT NOT NULL,
+     "EVENT_ID" BIGINT NOT NULL,
+     "EVENT_TIME" INTEGER NOT NULL,
+     "EVENT_TYPE" VARCHAR(32) NOT NULL,
+     "CAT_NAME" VARCHAR(256),
+     "DB_NAME" VARCHAR(128),
+     "TBL_NAME" VARCHAR(256),
+     "MESSAGE" text,
+     "MESSAGE_FORMAT" VARCHAR(16),
+     PRIMARY KEY ("NL_ID")
+ );
+ 
+ CREATE TABLE "NOTIFICATION_SEQUENCE"
+ (
+     "NNI_ID" BIGINT NOT NULL,
+     "NEXT_EVENT_ID" BIGINT NOT NULL,
+     PRIMARY KEY ("NNI_ID")
+ );
+ 
+ INSERT INTO "NOTIFICATION_SEQUENCE" ("NNI_ID", "NEXT_EVENT_ID") SELECT 1,1 WHERE NOT EXISTS ( SELECT "NEXT_EVENT_ID" FROM "NOTIFICATION_SEQUENCE");
+ 
+ CREATE TABLE "KEY_CONSTRAINTS"
+ (
+   "CHILD_CD_ID" BIGINT,
+   "CHILD_INTEGER_IDX" BIGINT,
+   "CHILD_TBL_ID" BIGINT,
+   "PARENT_CD_ID" BIGINT,
+   "PARENT_INTEGER_IDX" BIGINT NOT NULL,
+   "PARENT_TBL_ID" BIGINT NOT NULL,
+   "POSITION" BIGINT NOT NULL,
+   "CONSTRAINT_NAME" VARCHAR(400) NOT NULL,
+   "CONSTRAINT_TYPE" SMALLINT NOT NULL,
+   "UPDATE_RULE" SMALLINT,
+   "DELETE_RULE"	SMALLINT,
+   "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL,
+   "DEFAULT_VALUE" VARCHAR(400),
+   PRIMARY KEY ("CONSTRAINT_NAME", "POSITION")
+ ) ;
+ 
+ ---
+ --- Table structure for METASTORE_DB_PROPERTIES
+ ---
+ CREATE TABLE "METASTORE_DB_PROPERTIES"
+ (
+   "PROPERTY_KEY" VARCHAR(255) NOT NULL,
+   "PROPERTY_VALUE" VARCHAR(1000) NOT NULL,
+   "DESCRIPTION" VARCHAR(1000)
+ );
+ 
+ 
+ CREATE TABLE "WM_RESOURCEPLAN" (
+     "RP_ID" bigint NOT NULL,
+     "NAME" character varying(128) NOT NULL,
+     "QUERY_PARALLELISM" integer,
+     "STATUS" character varying(20) NOT NULL,
+     "DEFAULT_POOL_ID" bigint
+ );
+ 
+ CREATE TABLE "WM_POOL" (
+     "POOL_ID" bigint NOT NULL,
+     "RP_ID" bigint NOT NULL,
+     "PATH" character varying(1024) NOT NULL,
+     "ALLOC_FRACTION" double precision,
+     "QUERY_PARALLELISM" integer,
+     "SCHEDULING_POLICY" character varying(1024)
+ );
+ 
+ CREATE TABLE "WM_TRIGGER" (
+     "TRIGGER_ID" bigint NOT NULL,
+     "RP_ID" bigint NOT NULL,
+     "NAME" character varying(128) NOT NULL,
+     "TRIGGER_EXPRESSION" character varying(1024) DEFAULT NULL::character varying,
+     "ACTION_EXPRESSION" character varying(1024) DEFAULT NULL::character varying,
+     "IS_IN_UNMANAGED" smallint NOT NULL DEFAULT 0
+ );
+ 
+ CREATE TABLE "WM_POOL_TO_TRIGGER" (
+     "POOL_ID" bigint NOT NULL,
+     "TRIGGER_ID" bigint NOT NULL
+ );
+ 
+ CREATE TABLE "WM_MAPPING" (
+     "MAPPING_ID" bigint NOT NULL,
+     "RP_ID" bigint NOT NULL,
+     "ENTITY_TYPE" character varying(128) NOT NULL,
+     "ENTITY_NAME" character varying(128) NOT NULL,
+     "POOL_ID" bigint,
+     "ORDERING" integer
+ );
+ 
+ --
+ -- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "BUCKETING_COLS"
+     ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+ 
+ 
+ --
+ -- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "CDS"
+     ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID");
+ 
+ 
+ --
+ -- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "COLUMNS_V2"
+     ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
+ 
+ 
+ --
+ -- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "DATABASE_PARAMS"
+     ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY");
+ 
+ 
+ --
+ -- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "DB_PRIVS"
+     ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("AUTHORIZER", "DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ 
+ --
+ -- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "DBS"
+     ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID");
+ 
+ 
+ --
+ -- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "DB_PRIVS"
+     ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID");
+ 
+ 
+ --
+ -- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "GLOBAL_PRIVS"
+     ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("AUTHORIZER", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ 
+ --
+ -- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "GLOBAL_PRIVS"
+     ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID");
+ 
+ 
+ --
+ -- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "IDXS"
+     ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID");
+ 
+ 
+ --
+ -- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "INDEX_PARAMS"
+     ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
+ 
+ 
+ --
+ -- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "NUCLEUS_TABLES"
+     ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME");
+ 
+ 
+ --
+ -- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "PARTITIONS"
+     ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID");
+ 
+ 
+ --
+ -- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "PARTITION_EVENTS"
+     ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID");
+ 
+ 
+ --
+ -- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "PARTITION_KEYS"
+     ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
+ 
+ 
+ --
+ -- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "PARTITION_KEY_VALS"
+     ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
+ 
+ 
+ --
+ -- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "PARTITION_PARAMS"
+     ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY");
+ 
+ 
+ --
+ -- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "PART_COL_PRIVS"
+     ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
+ 
+ 
+ --
+ -- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "PART_PRIVS"
+     ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID");
+ 
+ 
+ --
+ -- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "ROLES"
+     ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME");
+ 
+ 
+ --
+ -- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "ROLES"
+     ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID");
+ 
+ 
+ --
+ -- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "ROLE_MAP"
+     ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID");
+ 
+ 
+ --
+ -- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "SDS"
+     ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID");
+ 
+ 
+ --
+ -- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "SD_PARAMS"
+     ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY");
+ 
+ 
+ --
+ -- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "SEQUENCE_TABLE"
+     ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME");
+ 
+ 
+ --
+ -- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "SERDES"
+     ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID");
+ 
+ 
+ --
+ -- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "SERDE_PARAMS"
+     ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
+ 
+ 
+ --
+ -- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "SORT_COLS"
+     ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+ 
+ 
+ --
+ -- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "TABLE_PARAMS"
+     ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
+ 
+ 
+ --
+ -- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "TBLS"
+     ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID");
+ 
+ 
+ --
+ -- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "TBL_COL_PRIVS"
+     ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
+ 
+ 
+ --
+ -- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "TBL_PRIVS"
+     ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID");
+ 
+ 
+ --
+ -- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "TYPES"
+     ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID");
+ 
+ 
+ --
+ -- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "TYPE_FIELDS"
+     ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
+ 
+ ALTER TABLE ONLY "SKEWED_STRING_LIST"
+     ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID");
+ 
+ ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+     ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
+ 
+ 
+ ALTER TABLE ONLY "SKEWED_COL_NAMES"
+     ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+ 
+ ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+     ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
+ 
+ ALTER TABLE ONLY "SKEWED_VALUES"
+     ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
+ 
+ --
+ -- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+ 
+ --
+ -- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+ 
+ --
+ -- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "IDXS"
+     ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID");
+ 
+ 
+ --
+ -- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "PARTITIONS"
+     ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID");
+ 
+ 
+ --
+ -- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "TBLS"
+     ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID");
+ 
+ 
+ --
+ -- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "DBS"
+     ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME", "CTLG_NAME");
+ 
+ 
+ --
+ -- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "TYPES"
+     ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME");
+ 
+ 
+ --
+ -- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "ROLE_MAP"
+     ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
+ 
+ ALTER TABLE ONLY "METASTORE_DB_PROPERTIES"
+     ADD CONSTRAINT "PROPERTY_KEY_PK" PRIMARY KEY ("PROPERTY_KEY");
+ 
+ 
+ -- Resource plan: Primary key and unique key constraints.
+ ALTER TABLE ONLY "WM_RESOURCEPLAN"
+     ADD CONSTRAINT "WM_RESOURCEPLAN_pkey" PRIMARY KEY ("RP_ID");
+ 
+ ALTER TABLE ONLY "WM_RESOURCEPLAN"
+     ADD CONSTRAINT "UNIQUE_WM_RESOURCEPLAN" UNIQUE ("NAME");
+ 
+ ALTER TABLE ONLY "WM_POOL"
+     ADD CONSTRAINT "WM_POOL_pkey" PRIMARY KEY ("POOL_ID");
+ 
+ ALTER TABLE ONLY "WM_POOL"
+     ADD CONSTRAINT "UNIQUE_WM_POOL" UNIQUE ("RP_ID", "PATH");
+ 
+ ALTER TABLE ONLY "WM_TRIGGER"
+     ADD CONSTRAINT "WM_TRIGGER_pkey" PRIMARY KEY ("TRIGGER_ID");
+ 
+ ALTER TABLE ONLY "WM_TRIGGER"
+     ADD CONSTRAINT "UNIQUE_WM_TRIGGER" UNIQUE ("RP_ID", "NAME");
+ 
+ ALTER TABLE ONLY "WM_POOL_TO_TRIGGER"
+     ADD CONSTRAINT "WM_POOL_TO_TRIGGER_pkey" PRIMARY KEY ("POOL_ID", "TRIGGER_ID");
+ 
+ ALTER TABLE ONLY "WM_MAPPING"
+     ADD CONSTRAINT "WM_MAPPING_pkey" PRIMARY KEY ("MAPPING_ID");
+ 
+ ALTER TABLE ONLY "WM_MAPPING"
+     ADD CONSTRAINT "UNIQUE_WM_MAPPING" UNIQUE ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME");
+ 
+ --
+ -- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID");
+ 
+ 
+ --
+ -- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID");
+ 
+ 
+ --
+ -- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID");
+ 
+ 
+ --
+ -- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID");
+ 
+ 
+ --
+ -- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID");
+ 
+ 
+ --
+ -- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID");
+ 
+ 
+ --
+ -- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID");
+ 
+ 
+ --
+ -- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("AUTHORIZER", "PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ 
+ --
+ -- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME");
+ 
+ 
+ --
+ -- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID");
+ 
+ 
+ --
+ -- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID");
+ 
+ 
+ --
+ -- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID");
+ 
+ 
+ --
+ -- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID");
+ 
+ 
+ --
+ -- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID");
+ 
+ 
+ --
+ -- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("AUTHORIZER", "PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ 
+ --
+ -- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID");
+ 
+ 
+ --
+ -- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID");
+ 
+ 
+ --
+ -- Name: PCS_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" USING btree ("CAT_NAME", "DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME");
+ 
+ 
+ --
+ -- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID");
+ 
+ 
+ --
+ -- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID");
+ 
+ 
+ --
+ -- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID");
+ 
+ 
+ --
+ -- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID");
+ 
+ 
+ --
+ -- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID");
+ 
+ 
+ --
+ -- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("AUTHORIZER", "TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ 
+ --
+ -- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("AUTHORIZER", "TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ 
+ --
+ -- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID");
+ 
+ 
+ --
+ -- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID");
+ 
+ 
+ --
+ -- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
+ 
+ 
+ --
+ -- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
+ 
+ --
+ -- Name: TAB_COL_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TAB_COL_STATS_IDX" ON "TAB_COL_STATS" USING btree ("CAT_NAME", "DB_NAME","TABLE_NAME","COLUMN_NAME");
+ 
+ --
+ -- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID");
+ 
+ 
+ --
+ -- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
+ 
+ --
+ -- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
+ 
+ --
+ -- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
+ 
+ --
+ -- Name: UNIQUEFUNCTION; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE UNIQUE INDEX "UNIQUEFUNCTION" ON "FUNCS" ("FUNC_NAME", "DB_ID");
+ 
+ --
+ -- Name: FUNCS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "FUNCS_N49" ON "FUNCS" ("DB_ID");
+ 
+ --
+ -- Name: FUNC_RU_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "FUNC_RU_N49" ON "FUNC_RU" ("FUNC_ID");
+ 
+ CREATE INDEX "CONSTRAINTS_PARENT_TBLID_INDEX" ON "KEY_CONSTRAINTS" USING BTREE ("PARENT_TBL_ID");
+ 
+ CREATE INDEX "CONSTRAINTS_CONSTRAINT_TYPE_INDEX" ON "KEY_CONSTRAINTS" USING BTREE ("CONSTRAINT_TYPE");
+ 
+ ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+     ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+ 
+ 
+ ALTER TABLE ONLY "SKEWED_COL_NAMES"
+     ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ 
+ ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+     ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+     ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "SKEWED_VALUES"
+     ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "SKEWED_VALUES"
+     ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "BUCKETING_COLS"
+     ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "COLUMNS_V2"
+     ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "DATABASE_PARAMS"
+     ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "DB_PRIVS"
+     ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "IDXS"
+     ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "IDXS"
+     ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "IDXS"
+     ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "INDEX_PARAMS"
+     ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "PARTITIONS"
+     ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "PARTITIONS"
+     ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "PARTITION_KEYS"
+     ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "PARTITION_KEY_VALS"
+     ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "PARTITION_PARAMS"
+     ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "PART_COL_PRIVS"
+     ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "PART_PRIVS"
+     ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "ROLE_MAP"
+     ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "SDS"
+     ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "SDS"
+     ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "SD_PARAMS"
+     ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "SERDE_PARAMS"
+     ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "SORT_COLS"
+     ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "TABLE_PARAMS"
+     ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "TBLS"
+     ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "TBLS"
+     ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "TBL_COL_PRIVS"
+     ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "TBL_PRIVS"
+     ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "TYPE_FIELDS"
+     ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE;
+ 
+ --
+ -- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+ 
+ ALTER TABLE "DBS" ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES "CTLGS" ("NAME");
+ 
+ ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
+ 
+ -- Name: FUNCS_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ ALTER TABLE ONLY "FUNCS"
+     ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID") DEFERRABLE;
+ 
+ -- Name: FUNC_RU_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ ALTER TABLE ONLY "FUNC_RU"
+     ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "FUNCS" ("FUNC_ID") DEFERRABLE;
+ 
+ -- Resource plan FK constraints.
+ 
+ ALTER TABLE ONLY "WM_POOL"
+     ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "WM_RESOURCEPLAN"
+     ADD CONSTRAINT "WM_RESOURCEPLAN_FK1" FOREIGN KEY ("DEFAULT_POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "WM_TRIGGER"
+     ADD CONSTRAINT "WM_TRIGGER_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "WM_POOL_TO_TRIGGER"
+     ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK1" FOREIGN KEY ("POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "WM_POOL_TO_TRIGGER"
+     ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK2" FOREIGN KEY ("TRIGGER_ID") REFERENCES "WM_TRIGGER" ("TRIGGER_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "WM_MAPPING"
+     ADD CONSTRAINT "WM_MAPPING_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "WM_MAPPING"
+     ADD CONSTRAINT "WM_MAPPING_FK2" FOREIGN KEY ("POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "MV_CREATION_METADATA"
+     ADD CONSTRAINT "MV_CREATION_METADATA_PK" PRIMARY KEY ("MV_CREATION_METADATA_ID");
+ 
+ CREATE INDEX "MV_UNIQUE_TABLE"
+     ON "MV_CREATION_METADATA" USING btree ("TBL_NAME", "DB_NAME");
+ 
+ ALTER TABLE ONLY "MV_TABLES_USED"
+     ADD CONSTRAINT "MV_TABLES_USED_FK1" FOREIGN KEY ("MV_CREATION_METADATA_ID") REFERENCES "MV_CREATION_METADATA" ("MV_CREATION_METADATA_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "MV_TABLES_USED"
+     ADD CONSTRAINT "MV_TABLES_USED_FK2" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS" ("TBL_ID") DEFERRABLE;
+ 
+ --
+ -- Name: public; Type: ACL; Schema: -; Owner: hiveuser
+ --
+ 
+ REVOKE ALL ON SCHEMA public FROM PUBLIC;
+ GRANT ALL ON SCHEMA public TO PUBLIC;
+ 
+ --
+ -- PostgreSQL database dump complete
+ --
+ 
+ ------------------------------
+ -- Transaction and lock tables
+ ------------------------------
+ CREATE TABLE TXNS (
+   TXN_ID bigint PRIMARY KEY,
+   TXN_STATE char(1) NOT NULL,
+   TXN_STARTED bigint NOT NULL,
+   TXN_LAST_HEARTBEAT bigint NOT NULL,
+   TXN_USER varchar(128) NOT NULL,
+   TXN_HOST varchar(128) NOT NULL,
+   TXN_AGENT_INFO varchar(128),
+   TXN_META_INFO varchar(128),
+   TXN_HEARTBEAT_COUNT integer,
+   TXN_TYPE integer
+ );
+ 
+ CREATE TABLE TXN_COMPONENTS (
+   TC_TXNID bigint NOT NULL REFERENCES TXNS (TXN_ID),
+   TC_DATABASE varchar(128) NOT NULL,
+   TC_TABLE varchar(128),
+   TC_PARTITION varchar(767) DEFAULT NULL,
+   TC_OPERATION_TYPE char(1) NOT NULL,
+   TC_WRITEID bigint
+ );
+ 
+ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS USING hash (TC_TXNID);
+ 
+ CREATE TABLE COMPLETED_TXN_COMPONENTS (
+   CTC_TXNID bigint NOT NULL,
+   CTC_DATABASE varchar(128) NOT NULL,
+   CTC_TABLE varchar(256),
+   CTC_PARTITION varchar(767),
+   CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL,
+   CTC_WRITEID bigint,
+   CTC_UPDATE_DELETE char(1) NOT NULL
+ );
+ 
+ CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS USING btree (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
+ 
+ CREATE TABLE NEXT_TXN_ID (
+   NTXN_NEXT bigint NOT NULL
+ );
+ INSERT INTO NEXT_TXN_ID VALUES(1);
+ 
+ CREATE TABLE HIVE_LOCKS (
+   HL_LOCK_EXT_ID bigint NOT NULL,
+   HL_LOCK_INT_ID bigint NOT NULL,
+   HL_TXNID bigint NOT NULL,
+   HL_DB varchar(128) NOT NULL,
+   HL_TABLE varchar(128),
+   HL_PARTITION varchar(767) DEFAULT NULL,
+   HL_LOCK_STATE char(1) NOT NULL,
+   HL_LOCK_TYPE char(1) NOT NULL,
+   HL_LAST_HEARTBEAT bigint NOT NULL,
+   HL_ACQUIRED_AT bigint,
+   HL_USER varchar(128) NOT NULL,
+   HL_HOST varchar(128) NOT NULL,
+   HL_HEARTBEAT_COUNT integer,
+   HL_AGENT_INFO varchar(128),
+   HL_BLOCKEDBY_EXT_ID bigint,
+   HL_BLOCKEDBY_INT_ID bigint,
+   PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+ );
+ 
+ CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS USING hash (HL_TXNID);
+ 
+ CREATE TABLE NEXT_LOCK_ID (
+   NL_NEXT bigint NOT NULL
+ );
+ INSERT INTO NEXT_LOCK_ID VALUES(1);
+ 
+ CREATE TABLE COMPACTION_QUEUE (
+   CQ_ID bigint PRIMARY KEY,
+   CQ_DATABASE varchar(128) NOT NULL,
+   CQ_TABLE varchar(128) NOT NULL,
+   CQ_PARTITION varchar(767),
+   CQ_STATE char(1) NOT NULL,
+   CQ_TYPE char(1) NOT NULL,
+   CQ_TBLPROPERTIES varchar(2048),
+   CQ_WORKER_ID varchar(128),
+   CQ_START bigint,
+   CQ_RUN_AS varchar(128),
+   CQ_HIGHEST_WRITE_ID bigint,
+   CQ_META_INFO bytea,
+   CQ_HADOOP_JOB_ID varchar(32)
+ );
+ 
+ CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+   NCQ_NEXT bigint NOT NULL
+ );
+ INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+ 
+ CREATE TABLE COMPLETED_COMPACTIONS (
+   CC_ID bigint PRIMARY KEY,
+   CC_DATABASE varchar(128) NOT NULL,
+   CC_TABLE varchar(128) NOT NULL,
+   CC_PARTITION varchar(767),
+   CC_STATE char(1) NOT NULL,
+   CC_TYPE char(1) NOT NULL,
+   CC_TBLPROPERTIES varchar(2048),
+   CC_WORKER_ID varchar(128),
+   CC_START bigint,
+   CC_END bigint,
+   CC_RUN_AS varchar(128),
+   CC_HIGHEST_WRITE_ID bigint,
+   CC_META_INFO bytea,
+   CC_HADOOP_JOB_ID varchar(32)
+ );
+ 
+ CREATE TABLE AUX_TABLE (
+   MT_KEY1 varchar(128) NOT NULL,
+   MT_KEY2 bigint NOT NULL,
+   MT_COMMENT varchar(255),
+   PRIMARY KEY(MT_KEY1, MT_KEY2)
+ );
+ 
+ CREATE TABLE WRITE_SET (
+   WS_DATABASE varchar(128) NOT NULL,
+   WS_TABLE varchar(128) NOT NULL,
+   WS_PARTITION varchar(767),
+   WS_TXNID bigint NOT NULL,
+   WS_COMMIT_ID bigint NOT NULL,
+   WS_OPERATION_TYPE char(1) NOT NULL
+ );
+ 
+ CREATE TABLE TXN_TO_WRITE_ID (
+   T2W_TXNID bigint NOT NULL,
+   T2W_DATABASE varchar(128) NOT NULL,
+   T2W_TABLE varchar(256) NOT NULL,
+   T2W_WRITEID bigint NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID);
+ CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID);
+ 
+ CREATE TABLE NEXT_WRITE_ID (
+   NWI_DATABASE varchar(128) NOT NULL,
+   NWI_TABLE varchar(256) NOT NULL,
+   NWI_NEXT bigint NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE);
+ 
+ CREATE TABLE MIN_HISTORY_LEVEL (
+   MHL_TXNID bigint NOT NULL,
+   MHL_MIN_OPEN_TXNID bigint NOT NULL,
+   PRIMARY KEY(MHL_TXNID)
+ );
+ 
+ CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
+ 
+ CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+   MRL_TXN_ID bigint NOT NULL,
+   MRL_DB_NAME varchar(128) NOT NULL,
+   MRL_TBL_NAME varchar(256) NOT NULL,
+   MRL_LAST_HEARTBEAT bigint NOT NULL,
+   PRIMARY KEY(MRL_TXN_ID)
+ );
+ 
+ CREATE TABLE "I_SCHEMA" (
+   "SCHEMA_ID" bigint primary key,
+   "SCHEMA_TYPE" integer not null,
+   "NAME" varchar(256) unique,
+   "DB_ID" bigint references "DBS" ("DB_ID"),
+   "COMPATIBILITY" integer not null,
+   "VALIDATION_LEVEL" integer not null,
+   "CAN_EVOLVE" boolean not null,
+   "SCHEMA_GROUP" varchar(256),
+   "DESCRIPTION" varchar(4000)
+ );
+ 
+ CREATE TABLE "SCHEMA_VERSION" (
+   "SCHEMA_VERSION_ID" bigint primary key,
+   "SCHEMA_ID" bigint references "I_SCHEMA" ("SCHEMA_ID"),
+   "VERSION" integer not null,
+   "CREATED_AT" bigint not null,
+   "CD_ID" bigint references "CDS" ("CD_ID"), 
+   "STATE" integer not null,
+   "DESCRIPTION" varchar(4000),
+   "SCHEMA_TEXT" text,
+   "FINGERPRINT" varchar(256),
+   "SCHEMA_VERSION_NAME" varchar(256),
+   "SERDE_ID" bigint references "SERDES" ("SERDE_ID"), 
+   unique ("SCHEMA_ID", "VERSION")
+ );
+ 
+ CREATE TABLE REPL_TXN_MAP (
+   RTM_REPL_POLICY varchar(256) NOT NULL,
+   RTM_SRC_TXN_ID bigint NOT NULL,
+   RTM_TARGET_TXN_ID bigint NOT NULL,
+   PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID)
+ );
+ 
+ 
+ CREATE TABLE RUNTIME_STATS (
+  RS_ID bigint primary key,
+  CREATE_TIME bigint NOT NULL,
+  WEIGHT bigint NOT NULL,
+  PAYLOAD bytea
+ );
+ 
+ CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+ 
+ CREATE TABLE "TXN_WRITE_NOTIFICATION_LOG" (
+   "WNL_ID" bigint NOT NULL,
+   "WNL_TXNID" bigint NOT NULL,
+   "WNL_WRITEID" bigint NOT NULL,
+   "WNL_DATABASE" varchar(128) NOT NULL,
+   "WNL_TABLE" varchar(128) NOT NULL,
+   "WNL_PARTITION" varchar(1024) NOT NULL,
+   "WNL_TABLE_OBJ" text NOT NULL,
+   "WNL_PARTITION_OBJ" text,
+   "WNL_FILES" text,
+   "WNL_EVENT_TIME" integer NOT NULL,
+   PRIMARY KEY ("WNL_TXNID", "WNL_DATABASE", "WNL_TABLE", "WNL_PARTITION")
+ );
+ 
+ INSERT INTO "SEQUENCE_TABLE" ("SEQUENCE_NAME", "NEXT_VAL") VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1);
+ 
+ -- -----------------------------------------------------------------
+ -- Record schema version. Should be the last step in the init script
+ -- -----------------------------------------------------------------
+ INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '4.0.0', 'Hive release version 4.0.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql
index 0000000,40d2795..eff08b3
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql
@@@ -1,0 -1,6 +1,10 @@@
+ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0';
+ 
++-- HIVE-19416
++ALTER TABLE "TBLS" ADD "WRITE_ID" bigint;
++ALTER TABLE "PARTITIONS" ADD "WRITE_ID" bigint;
++
+ -- These lines need to be last.  Insert any changes above.
+ UPDATE "VERSION" SET "SCHEMA_VERSION"='4.0.0', "VERSION_COMMENT"='Hive release version 4.0.0' where "VER_ID"=1;
+ SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0';
+ 


[28/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
index 0000000,a7ca05a..35be3c4
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
@@@ -1,0 -1,335 +1,336 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.common;
+ 
+ import java.io.IOException;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.TreeMap;
+ 
+ import com.google.common.collect.ImmutableList;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import com.fasterxml.jackson.annotation.JsonInclude;
+ import com.fasterxml.jackson.annotation.JsonProperty;
+ import com.fasterxml.jackson.core.JsonGenerator;
+ import com.fasterxml.jackson.core.JsonParser;
+ import com.fasterxml.jackson.core.JsonProcessingException;
+ import com.fasterxml.jackson.databind.DeserializationContext;
+ import com.fasterxml.jackson.databind.JsonDeserializer;
+ import com.fasterxml.jackson.databind.JsonSerializer;
+ import com.fasterxml.jackson.databind.ObjectMapper;
+ import com.fasterxml.jackson.databind.ObjectReader;
+ import com.fasterxml.jackson.databind.ObjectWriter;
+ import com.fasterxml.jackson.databind.SerializerProvider;
+ import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
+ import com.fasterxml.jackson.databind.annotation.JsonSerialize;
+ 
+ 
+ /**
+  * A class that defines the constant strings used by the statistics implementation.
+  */
+ 
+ public class StatsSetupConst {
+ 
+   protected static final Logger LOG = LoggerFactory.getLogger(StatsSetupConst.class.getName());
+ 
+   public enum StatDB {
+     fs {
+       @Override
+       public String getPublisher(Configuration conf) {
+         return "org.apache.hadoop.hive.ql.stats.fs.FSStatsPublisher";
+       }
+ 
+       @Override
+       public String getAggregator(Configuration conf) {
+         return "org.apache.hadoop.hive.ql.stats.fs.FSStatsAggregator";
+       }
+     },
+     custom {
+       @Override
+       public String getPublisher(Configuration conf) {
+         return MetastoreConf.getVar(conf, ConfVars.STATS_DEFAULT_PUBLISHER); }
+       @Override
+       public String getAggregator(Configuration conf) {
+         return MetastoreConf.getVar(conf,  ConfVars.STATS_DEFAULT_AGGREGATOR); }
+     };
+     public abstract String getPublisher(Configuration conf);
+     public abstract String getAggregator(Configuration conf);
+   }
+ 
+   // statistics stored in metastore
+   /**
+    * The name of the statistic Num Files to be published or gathered.
+    */
+   public static final String NUM_FILES = "numFiles";
+ 
+   /**
+    * The name of the statistic Num Partitions to be published or gathered.
+    */
+   public static final String NUM_PARTITIONS = "numPartitions";
+ 
+   /**
+    * The name of the statistic Total Size to be published or gathered.
+    */
+   public static final String TOTAL_SIZE = "totalSize";
+ 
+   /**
+    * The name of the statistic Row Count to be published or gathered.
+    */
+   public static final String ROW_COUNT = "numRows";
+ 
+   public static final String RUN_TIME_ROW_COUNT = "runTimeNumRows";
+ 
+   /**
+    * The name of the statistic Raw Data Size to be published or gathered.
+    */
+   public static final String RAW_DATA_SIZE = "rawDataSize";
+ 
+   /**
+    * The name of the statistic for Number of Erasure Coded Files - to be published or gathered.
+    */
+   public static final String NUM_ERASURE_CODED_FILES = "numFilesErasureCoded";
+ 
+   /**
+    * Temp dir for writing stats from tasks.
+    */
+   public static final String STATS_TMP_LOC = "hive.stats.tmp.loc";
+ 
+   public static final String STATS_FILE_PREFIX = "tmpstats-";
+   /**
+    * List of all supported statistics
+    */
+   public static final List<String> SUPPORTED_STATS = ImmutableList.of(
+       NUM_FILES, ROW_COUNT, TOTAL_SIZE, RAW_DATA_SIZE, NUM_ERASURE_CODED_FILES);
+ 
+   /**
+    * List of all statistics that need to be collected during query execution. These are
+    * statistics that inherently require a scan of the data.
+    */
+   public static final List<String> STATS_REQUIRE_COMPUTE = ImmutableList.of(ROW_COUNT, RAW_DATA_SIZE);
+ 
+   /**
+    * List of statistics that can be collected quickly without requiring a scan of the data.
+    */
+   public static final List<String> FAST_STATS = ImmutableList.of(
+       NUM_FILES, TOTAL_SIZE, NUM_ERASURE_CODED_FILES);
+ 
+   // This string constant is used to indicate to AlterHandler that
+   // alterPartition/alterTable is happening via statsTask or via user.
+   public static final String STATS_GENERATED = "STATS_GENERATED";
+ 
+   public static final String TASK = "TASK";
+ 
+   public static final String USER = "USER";
+ 
+   // This string constant is used by AlterHandler to figure out that it should not attempt to
+   // update stats. It is set by any client-side task which wishes to signal that no stats
+   // update should take place, such as with replication.
+   public static final String DO_NOT_UPDATE_STATS = "DO_NOT_UPDATE_STATS";
+ 
+   //This string constant will be persisted in metastore to indicate whether corresponding
+   //table or partition's statistics and table or partition's column statistics are accurate or not.
+   public static final String COLUMN_STATS_ACCURATE = "COLUMN_STATS_ACCURATE";
+ 
+   public static final String COLUMN_STATS = "COLUMN_STATS";
+ 
+   public static final String BASIC_STATS = "BASIC_STATS";
+ 
+   public static final String CASCADE = "CASCADE";
+ 
+   public static final String TRUE = "true";
+ 
+   public static final String FALSE = "false";
+ 
+   // The parameter keys for the table statistics. Those keys are excluded from 'show create table' command output.
+   public static final List<String> TABLE_PARAMS_STATS_KEYS = ImmutableList.of(
+       COLUMN_STATS_ACCURATE, NUM_FILES, TOTAL_SIZE, ROW_COUNT, RAW_DATA_SIZE, NUM_PARTITIONS,
+       NUM_ERASURE_CODED_FILES);
+ 
+   private static class ColumnStatsAccurate {
+     private static ObjectReader objectReader;
+     private static ObjectWriter objectWriter;
+ 
+     static {
+       ObjectMapper objectMapper = new ObjectMapper();
+       objectReader = objectMapper.readerFor(ColumnStatsAccurate.class);
+       objectWriter = objectMapper.writerFor(ColumnStatsAccurate.class);
+     }
+ 
+     static class BooleanSerializer extends JsonSerializer<Boolean> {
+ 
+       @Override
+       public void serialize(Boolean value, JsonGenerator jsonGenerator,
+           SerializerProvider serializerProvider) throws IOException {
+         jsonGenerator.writeString(value.toString());
+       }
+     }
+ 
+     static class BooleanDeserializer extends JsonDeserializer<Boolean> {
+ 
+       public Boolean deserialize(JsonParser jsonParser,
+           DeserializationContext deserializationContext)
+               throws IOException {
+         return Boolean.valueOf(jsonParser.getValueAsString());
+       }
+     }
+ 
+     @JsonInclude(JsonInclude.Include.NON_DEFAULT)
+     @JsonSerialize(using = BooleanSerializer.class)
+     @JsonDeserialize(using = BooleanDeserializer.class)
+     @JsonProperty(BASIC_STATS)
+     boolean basicStats;
+ 
+     @JsonInclude(JsonInclude.Include.NON_EMPTY)
+     @JsonProperty(COLUMN_STATS)
+     @JsonSerialize(contentUsing = BooleanSerializer.class)
+     @JsonDeserialize(contentUsing = BooleanDeserializer.class)
+     TreeMap<String, Boolean> columnStats = new TreeMap<>();
+ 
+   }
+ 
+   public static boolean areBasicStatsUptoDate(Map<String, String> params) {
+     if (params == null) {
+       return false;
+     }
+     ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
+     return stats.basicStats;
+   }
+ 
+   public static boolean areColumnStatsUptoDate(Map<String, String> params, String colName) {
+     if (params == null) {
+       return false;
+     }
+     ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
+     return stats.columnStats.containsKey(colName);
+   }
+ 
+   // It will only throw JSONException when stats.put(BASIC_STATS, TRUE)
+   // has duplicate key, which is not possible
+   // note that set basic stats false will wipe out column stats too.
+   public static void setBasicStatsState(Map<String, String> params, String setting) {
+     if (setting.equals(FALSE)) {
+       if (params!=null && params.containsKey(COLUMN_STATS_ACCURATE)) {
+         params.remove(COLUMN_STATS_ACCURATE);
+       }
+       return;
+     }
+     if (params == null) {
+       throw new RuntimeException("params are null...cant set columnstatstate!");
+     }
+     ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
+     stats.basicStats = true;
+     try {
+       params.put(COLUMN_STATS_ACCURATE, ColumnStatsAccurate.objectWriter.writeValueAsString(stats));
+     } catch (JsonProcessingException e) {
+       throw new RuntimeException("can't serialize column stats", e);
+     }
+   }
+ 
+   public static void setColumnStatsState(Map<String, String> params, List<String> colNames) {
+     if (params == null) {
+       throw new RuntimeException("params are null...cant set columnstatstate!");
+     }
+     if (colNames == null) {
+       return;
+     }
+     ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
+ 
+     for (String colName : colNames) {
+       if (!stats.columnStats.containsKey(colName)) {
+         stats.columnStats.put(colName, true);
+       }
+     }
+     try {
+       params.put(COLUMN_STATS_ACCURATE, ColumnStatsAccurate.objectWriter.writeValueAsString(stats));
+     } catch (JsonProcessingException e) {
+       LOG.trace(e.getMessage());
+     }
+   }
+ 
+   public static boolean canColumnStatsMerge(Map<String, String> params, String colName) {
+     if (params == null) {
+       return false;
+     }
++    // TODO: should this also check that the basic flag is valid?
+     ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
+     return stats.columnStats.containsKey(colName);
+   }
 -  
++
+   public static void clearColumnStatsState(Map<String, String> params) {
+     if (params == null) {
+       return;
+     }
+ 
+     ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
+     stats.columnStats.clear();
+ 
+     try {
+       params.put(COLUMN_STATS_ACCURATE, ColumnStatsAccurate.objectWriter.writeValueAsString(stats));
+     } catch (JsonProcessingException e) {
+       LOG.trace(e.getMessage());
+     }
+   }
+ 
+   public static void removeColumnStatsState(Map<String, String> params, List<String> colNames) {
+     if (params == null) {
+       return;
+     }
+     try {
+       ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
+       for (String string : colNames) {
+         stats.columnStats.remove(string);
+       }
+       params.put(COLUMN_STATS_ACCURATE, ColumnStatsAccurate.objectWriter.writeValueAsString(stats));
+     } catch (JsonProcessingException e) {
+       LOG.trace(e.getMessage());
+     }
+   }
+ 
+   public static void setStatsStateForCreateTable(Map<String, String> params,
+       List<String> cols, String setting) {
+     if (TRUE.equals(setting)) {
+       for (String stat : StatsSetupConst.SUPPORTED_STATS) {
+         params.put(stat, "0");
+       }
+     }
+     setBasicStatsState(params, setting);
+     if (TRUE.equals(setting)) {
+       setColumnStatsState(params, cols);
+     }
+   }
 -  
++
+   private static ColumnStatsAccurate parseStatsAcc(String statsAcc) {
+     if (statsAcc == null) {
+       return new ColumnStatsAccurate();
+     }
+     try {
+       return ColumnStatsAccurate.objectReader.readValue(statsAcc);
+     } catch (Exception e) {
+       ColumnStatsAccurate ret = new ColumnStatsAccurate();
+       if (TRUE.equalsIgnoreCase(statsAcc)) {
+         ret.basicStats = true;
+       }
+       return ret;
+     }
+   }
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
index 0000000,050dca9..f3dc264
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
@@@ -1,0 -1,202 +1,204 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore;
+ 
+ import java.util.List;
+ 
+ import org.apache.hadoop.conf.Configurable;
+ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+ import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ 
+ /**
+  * Interface for Alter Table and Alter Partition code
+  */
+ public interface AlterHandler extends Configurable {
+ 
+   /**
+    * @deprecated As of release 2.2.0. Replaced by {@link #alterTable(RawStore, Warehouse, String,
+    * String, String, Table, EnvironmentContext, IHMSHandler)}
+    *
+    * handles alter table, the changes could be cascaded to partitions if applicable
+    *
+    * @param msdb
+    *          object to get metadata
+    * @param wh
+    *          Hive Warehouse where table data is stored
+    * @param catName
+    *          catalog of the table being altered
+    * @param dbname
+    *          database of the table being altered
+    * @param name
+    *          original name of the table being altered. same as
+    *          <i>newTable.tableName</i> if alter op is not a rename.
+    * @param newTable
+    *          new table object
+    * @throws InvalidOperationException
+    *           thrown if the newTable object is invalid
+    * @throws MetaException
+    *           thrown if there is any other error
+    */
+   @Deprecated
+   default void alterTable(RawStore msdb, Warehouse wh, String catName, String dbname,
+     String name, Table newTable, EnvironmentContext envContext)
+       throws InvalidOperationException, MetaException {
 -    alterTable(msdb, wh, catName, dbname, name, newTable, envContext, null);
++    alterTable(msdb, wh, catName, dbname, name, newTable, envContext, null, -1, null);
+   }
+ 
+   /**
+    * handles alter table, the changes could be cascaded to partitions if applicable
+    *
+    * @param msdb
+    *          object to get metadata
+    * @param wh
+    *          Hive Warehouse where table data is stored
+    * @param catName catalog of the table being altered
+    * @param dbname
+    *          database of the table being altered
+    * @param name
+    *          original name of the table being altered. same as
+    *          <i>newTable.tableName</i> if alter op is not a rename.
+    * @param newTable
+    *          new table object
+    * @param handler
+    *          HMSHandle object (required to log event notification)
+    * @throws InvalidOperationException
+    *           thrown if the newTable object is invalid
+    * @throws MetaException
+    *           thrown if there is any other error
+    */
+   void alterTable(RawStore msdb, Warehouse wh, String catName, String dbname,
+       String name, Table newTable, EnvironmentContext envContext,
 -      IHMSHandler handler) throws InvalidOperationException, MetaException;
++      IHMSHandler handler, long txnId, String writeIdList)
++          throws InvalidOperationException, MetaException;
+ 
+   /**
+    * @deprecated As of release 2.2.0.  Replaced by {@link #alterPartition(RawStore, Warehouse, String,
+    * String, List, Partition, EnvironmentContext, IHMSHandler)}
+    *
+    * handles alter partition
+    *
+    * @param msdb
+    *          object to get metadata
+    * @param wh
+    * @param dbname
+    *          database of the partition being altered
+    * @param name
+    *          table of the partition being altered
+    * @param part_vals
+    *          original values of the partition being altered
+    * @param new_part
+    *          new partition object
+    * @return the altered partition
+    * @throws InvalidOperationException
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    */
+   @Deprecated
+   Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname,
+     final String name, final List<String> part_vals, final Partition new_part,
+     EnvironmentContext environmentContext)
+       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
+ 
+   /**
+    * handles alter partition
+    *
+    * @param msdb
+    *          object to get metadata
+    * @param wh physical warehouse class
+    * @param catName catalog name
+    * @param dbname
+    *          database of the partition being altered
+    * @param name
+    *          table of the partition being altered
+    * @param part_vals
+    *          original values of the partition being altered
+    * @param new_part
+    *          new partition object
+    * @param handler
+    *          HMSHandle object (required to log event notification)
+    * @return the altered partition
+    * @throws InvalidOperationException
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    */
+   Partition alterPartition(final RawStore msdb, Warehouse wh, final String catName,
+                            final String dbname, final String name, final List<String> part_vals,
+                            final Partition new_part, EnvironmentContext environmentContext,
 -                           IHMSHandler handler)
++                           IHMSHandler handler, long txnId, String validWriteIds)
+       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
+ 
+   /**
+    * @deprecated As of release 3.0.0. Replaced by {@link #alterPartitions(RawStore, Warehouse, String,
+    * String, String, List, EnvironmentContext, IHMSHandler)}
+    *
+    * handles alter partitions
+    *
+    * @param msdb
+    *          object to get metadata
+    * @param wh
+    * @param dbname
+    *          database of the partition being altered
+    * @param name
+    *          table of the partition being altered
+    * @param new_parts
+    *          new partition list
+    * @return the altered partition list
+    * @throws InvalidOperationException
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    */
+   @Deprecated
+   List<Partition> alterPartitions(final RawStore msdb, Warehouse wh,
+     final String dbname, final String name, final List<Partition> new_parts,
+     EnvironmentContext environmentContext)
+       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
+ 
+   /**
+    * handles alter partitions
+    *
+    * @param msdb
+    *          object to get metadata
+    * @param wh
+    * @param dbname
+    *          database of the partition being altered
+    * @param name
+    *          table of the partition being altered
+    * @param new_parts
+    *          new partition list
+    * @param handler
+    *          HMSHandle object (required to log event notification)
+    * @return the altered partition list
+    * @throws InvalidOperationException
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    */
+   List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, final String catName,
+     final String dbname, final String name, final List<Partition> new_parts,
 -    EnvironmentContext environmentContext,IHMSHandler handler)
++    EnvironmentContext environmentContext, long txnId, String writeIdList, long writeId,
++    IHMSHandler handler)
+       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index 0000000,93ac74c..719f001
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@@ -1,0 -1,948 +1,961 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore;
+ 
+ import com.google.common.annotations.VisibleForTesting;
+ import com.google.common.collect.Lists;
+ 
+ import org.apache.commons.lang.StringUtils;
+ import org.apache.hadoop.hive.common.TableName;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+ import org.apache.hadoop.hive.metastore.messaging.EventMessage;
+ import org.apache.hadoop.hive.metastore.utils.FileUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.fs.FileSystem;
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.hive.common.StatsSetupConst;
+ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+ 
+ import java.io.IOException;
+ import java.net.URI;
+ import java.util.ArrayList;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.Iterator;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Map.Entry;
+ 
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
+ import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier;
+ 
+ /**
+  * Hive specific implementation of alter
+  */
+ public class HiveAlterHandler implements AlterHandler {
+ 
+   protected Configuration conf;
+   private static final Logger LOG = LoggerFactory.getLogger(HiveAlterHandler.class
+       .getName());
+ 
+   // hiveConf, getConf and setConf are in this class because AlterHandler extends Configurable.
+   // Always use the configuration from HMS Handler.  Making AlterHandler not extend Configurable
+   // is not in the scope of the fix for HIVE-17942.
+   @Override
+   public Configuration getConf() {
+     return conf;
+   }
+ 
+   @Override
+   @SuppressWarnings("nls")
+   public void setConf(Configuration conf) {
+     this.conf = conf;
+   }
+ 
+   @Override
+   public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbname,
+       String name, Table newt, EnvironmentContext environmentContext,
 -      IHMSHandler handler) throws InvalidOperationException, MetaException {
++      IHMSHandler handler, long txnId, String writeIdList)
++          throws InvalidOperationException, MetaException {
+     catName = normalizeIdentifier(catName);
+     name = name.toLowerCase();
+     dbname = dbname.toLowerCase();
+ 
+     final boolean cascade = environmentContext != null
+         && environmentContext.isSetProperties()
+         && StatsSetupConst.TRUE.equals(environmentContext.getProperties().get(
+             StatsSetupConst.CASCADE));
+     if (newt == null) {
+       throw new InvalidOperationException("New table is null");
+     }
+ 
+     String newTblName = newt.getTableName().toLowerCase();
+     String newDbName = newt.getDbName().toLowerCase();
+ 
+     if (!MetaStoreUtils.validateName(newTblName, handler.getConf())) {
+       throw new InvalidOperationException(newTblName + " is not a valid object name");
+     }
+     String validate = MetaStoreUtils.validateTblColumns(newt.getSd().getCols());
+     if (validate != null) {
+       throw new InvalidOperationException("Invalid column " + validate);
+     }
+ 
+     Path srcPath = null;
+     FileSystem srcFs;
+     Path destPath = null;
+     FileSystem destFs = null;
+ 
+     boolean success = false;
+     boolean dataWasMoved = false;
+     boolean isPartitionedTable = false;
+ 
+     Table oldt = null;
+ 
+     List<TransactionalMetaStoreEventListener> transactionalListeners = handler.getTransactionalListeners();
+     List<MetaStoreEventListener> listeners = handler.getListeners();
+     Map<String, String> txnAlterTableEventResponses = Collections.emptyMap();
+ 
+     try {
+       boolean rename = false;
+       List<Partition> parts;
+ 
+       // Switching tables between catalogs is not allowed.
+       if (!catName.equalsIgnoreCase(newt.getCatName())) {
+         throw new InvalidOperationException("Tables cannot be moved between catalogs, old catalog" +
+             catName + ", new catalog " + newt.getCatName());
+       }
+ 
+       // check if table with the new name already exists
+       if (!newTblName.equals(name) || !newDbName.equals(dbname)) {
 -        if (msdb.getTable(catName, newDbName, newTblName) != null) {
++        if (msdb.getTable(catName, newDbName, newTblName,  -1, null) != null) {
+           throw new InvalidOperationException("new table " + newDbName
+               + "." + newTblName + " already exists");
+         }
+         rename = true;
+       }
+ 
+       msdb.openTransaction();
+       // get old table
 -      oldt = msdb.getTable(catName, dbname, name);
++      // Note: we don't verify stats here; it's done below in alterTableUpdateTableColumnStats.
++      oldt = msdb.getTable(catName, dbname, name, -1, null);
+       if (oldt == null) {
+         throw new InvalidOperationException("table " +
+             TableName.getQualified(catName, dbname, name) + " doesn't exist");
+       }
+ 
+       if (oldt.getPartitionKeysSize() != 0) {
+         isPartitionedTable = true;
+       }
+ 
+       // Views derive the column type from the base table definition.  So the view definition
+       // can be altered to change the column types.  The column type compatibility checks should
+       // be done only for non-views.
+       if (MetastoreConf.getBoolVar(handler.getConf(),
+             MetastoreConf.ConfVars.DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES) &&
+           !oldt.getTableType().equals(TableType.VIRTUAL_VIEW.toString())) {
+         // Throws InvalidOperationException if the new column types are not
+         // compatible with the current column types.
+         checkColTypeChangeCompatible(oldt.getSd().getCols(), newt.getSd().getCols());
+       }
+ 
+       //check that partition keys have not changed, except for virtual views
+       //however, allow the partition comments to change
+       boolean partKeysPartiallyEqual = checkPartialPartKeysEqual(oldt.getPartitionKeys(),
+           newt.getPartitionKeys());
+ 
+       if(!oldt.getTableType().equals(TableType.VIRTUAL_VIEW.toString())){
+         if (!partKeysPartiallyEqual) {
+           throw new InvalidOperationException("partition keys can not be changed.");
+         }
+       }
+ 
+       // rename needs change the data location and move the data to the new location corresponding
+       // to the new name if:
+       // 1) the table is not a virtual view, and
+       // 2) the table is not an external table, and
+       // 3) the user didn't change the default location (or new location is empty), and
+       // 4) the table was not initially created with a specified location
+       if (rename
+           && !oldt.getTableType().equals(TableType.VIRTUAL_VIEW.toString())
+           && (oldt.getSd().getLocation().compareTo(newt.getSd().getLocation()) == 0
+             || StringUtils.isEmpty(newt.getSd().getLocation()))
+           && !MetaStoreUtils.isExternalTable(oldt)) {
+         Database olddb = msdb.getDatabase(catName, dbname);
+         // if a table was created in a user specified location using the DDL like
+         // create table tbl ... location ...., it should be treated like an external table
+         // in the table rename, its data location should not be changed. We can check
+         // if the table directory was created directly under its database directory to tell
+         // if it is such a table
+         srcPath = new Path(oldt.getSd().getLocation());
+         String oldtRelativePath = (new Path(olddb.getLocationUri()).toUri())
+             .relativize(srcPath.toUri()).toString();
+         boolean tableInSpecifiedLoc = !oldtRelativePath.equalsIgnoreCase(name)
+             && !oldtRelativePath.equalsIgnoreCase(name + Path.SEPARATOR);
+ 
+         if (!tableInSpecifiedLoc) {
+           srcFs = wh.getFs(srcPath);
+ 
+           // get new location
+           Database db = msdb.getDatabase(catName, newDbName);
+           Path databasePath = constructRenamedPath(wh.getDatabasePath(db), srcPath);
+           destPath = new Path(databasePath, newTblName);
+           destFs = wh.getFs(destPath);
+ 
+           newt.getSd().setLocation(destPath.toString());
+ 
+           // check that destination does not exist otherwise we will be
+           // overwriting data
+           // check that src and dest are on the same file system
+           if (!FileUtils.equalsFileSystem(srcFs, destFs)) {
+             throw new InvalidOperationException("table new location " + destPath
+                 + " is on a different file system than the old location "
+                 + srcPath + ". This operation is not supported");
+           }
+ 
+           try {
+             if (destFs.exists(destPath)) {
+               throw new InvalidOperationException("New location for this table " +
+                   TableName.getQualified(catName, newDbName, newTblName) +
+                       " already exists : " + destPath);
+             }
+             // check that src exists and also checks permissions necessary, rename src to dest
+             if (srcFs.exists(srcPath) && wh.renameDir(srcPath, destPath,
+                     ReplChangeManager.isSourceOfReplication(olddb))) {
+               dataWasMoved = true;
+             }
+           } catch (IOException | MetaException e) {
+             LOG.error("Alter Table operation for " + dbname + "." + name + " failed.", e);
+             throw new InvalidOperationException("Alter Table operation for " + dbname + "." + name +
+                 " failed to move data due to: '" + getSimpleMessage(e)
+                 + "' See hive log file for details.");
+           }
+ 
+           if (!HiveMetaStore.isRenameAllowed(olddb, db)) {
+             LOG.error("Alter Table operation for " + TableName.getQualified(catName, dbname, name) +
+                     "to new table = " + TableName.getQualified(catName, newDbName, newTblName) + " failed ");
+             throw new MetaException("Alter table not allowed for table " +
+                     TableName.getQualified(catName, dbname, name) +
+                     "to new table = " + TableName.getQualified(catName, newDbName, newTblName));
+           }
+         }
+ 
+         if (isPartitionedTable) {
+           String oldTblLocPath = srcPath.toUri().getPath();
+           String newTblLocPath = dataWasMoved ? destPath.toUri().getPath() : null;
+ 
+           // also the location field in partition
+           parts = msdb.getPartitions(catName, dbname, name, -1);
+           Map<Partition, ColumnStatistics> columnStatsNeedUpdated = new HashMap<>();
+           for (Partition part : parts) {
+             String oldPartLoc = part.getSd().getLocation();
+             if (dataWasMoved && oldPartLoc.contains(oldTblLocPath)) {
+               URI oldUri = new Path(oldPartLoc).toUri();
+               String newPath = oldUri.getPath().replace(oldTblLocPath, newTblLocPath);
+               Path newPartLocPath = new Path(oldUri.getScheme(), oldUri.getAuthority(), newPath);
+               part.getSd().setLocation(newPartLocPath.toString());
+             }
+             part.setDbName(newDbName);
+             part.setTableName(newTblName);
+             ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name,
+                 part.getValues(), part.getSd().getCols(), oldt, part, null);
+             if (colStats != null) {
+               columnStatsNeedUpdated.put(part, colStats);
+             }
+           }
 -          msdb.alterTable(catName, dbname, name, newt);
++          // Do not verify stats parameters on a partitioned table.
++          msdb.alterTable(catName, dbname, name, newt, -1, null);
+           // alterPartition is only for changing the partition location in the table rename
+           if (dataWasMoved) {
+ 
+             int partsToProcess = parts.size();
+             int partitionBatchSize = MetastoreConf.getIntVar(handler.getConf(),
+                 MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX);
+             int batchStart = 0;
+             while (partsToProcess > 0) {
+               int batchEnd = Math.min(batchStart + partitionBatchSize, parts.size());
+               List<Partition> partBatch = parts.subList(batchStart, batchEnd);
+               int partBatchSize = partBatch.size();
+               partsToProcess -= partBatchSize;
+               batchStart += partBatchSize;
+               List<List<String>> partValues = new ArrayList<>(partBatchSize);
+               for (Partition part : partBatch) {
+                 partValues.add(part.getValues());
+               }
 -              msdb.alterPartitions(catName, newDbName, newTblName, partValues, partBatch);
++              msdb.alterPartitions(catName, newDbName, newTblName, partValues,
++                  partBatch, newt.getWriteId(), txnId, writeIdList);
+             }
+           }
+ 
+           for (Entry<Partition, ColumnStatistics> partColStats : columnStatsNeedUpdated.entrySet()) {
+             ColumnStatistics newPartColStats = partColStats.getValue();
+             newPartColStats.getStatsDesc().setDbName(newDbName);
+             newPartColStats.getStatsDesc().setTableName(newTblName);
 -            msdb.updatePartitionColumnStatistics(newPartColStats, partColStats.getKey().getValues());
++            msdb.updatePartitionColumnStatistics(newPartColStats, partColStats.getKey().getValues(),
++                txnId, writeIdList, newt.getWriteId());
+           }
+         } else {
 -          alterTableUpdateTableColumnStats(msdb, oldt, newt);
++          alterTableUpdateTableColumnStats(
++              msdb, oldt, newt, environmentContext, txnId, writeIdList);
+         }
+       } else {
+         // operations other than table rename
 -
+         if (MetaStoreUtils.requireCalStats(null, null, newt, environmentContext) &&
+             !isPartitionedTable) {
+           Database db = msdb.getDatabase(catName, newDbName);
+           // Update table stats. For partitioned table, we update stats in alterPartition()
+           MetaStoreUtils.updateTableStatsSlow(db, newt, wh, false, true, environmentContext);
+         }
+ 
+         if (isPartitionedTable) {
+           //Currently only column related changes can be cascaded in alter table
+           if(!MetaStoreUtils.areSameColumns(oldt.getSd().getCols(), newt.getSd().getCols())) {
+             parts = msdb.getPartitions(catName, dbname, name, -1);
+             for (Partition part : parts) {
+               Partition oldPart = new Partition(part);
+               List<FieldSchema> oldCols = part.getSd().getCols();
+               part.getSd().setCols(newt.getSd().getCols());
+               ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name,
+                   part.getValues(), oldCols, oldt, part, null);
+               assert(colStats == null);
+               if (cascade) {
 -                msdb.alterPartition(catName, dbname, name, part.getValues(), part);
++                msdb.alterPartition(
++                    catName, dbname, name, part.getValues(), part, txnId, writeIdList);
+               } else {
+                 // update changed properties (stats)
+                 oldPart.setParameters(part.getParameters());
 -                msdb.alterPartition(catName, dbname, name, part.getValues(), oldPart);
++                msdb.alterPartition(
++                    catName, dbname, name, part.getValues(), oldPart, txnId, writeIdList);
+               }
+             }
 -            msdb.alterTable(catName, dbname, name, newt);
++            // Don't validate table-level stats for a partitoned table.
++            msdb.alterTable(catName, dbname, name, newt, -1, null);
+           } else {
+             LOG.warn("Alter table not cascaded to partitions.");
 -            alterTableUpdateTableColumnStats(msdb, oldt, newt);
++            alterTableUpdateTableColumnStats(
++                msdb, oldt, newt, environmentContext, txnId, writeIdList);
+           }
+         } else {
 -          alterTableUpdateTableColumnStats(msdb, oldt, newt);
++          alterTableUpdateTableColumnStats(
++              msdb, oldt, newt, environmentContext, txnId, writeIdList);
+         }
+       }
+ 
+       if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
+         txnAlterTableEventResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                   EventMessage.EventType.ALTER_TABLE,
+                   new AlterTableEvent(oldt, newt, false, true, handler),
+                   environmentContext);
+       }
+       // commit the changes
+       success = msdb.commitTransaction();
+     } catch (InvalidObjectException e) {
+       LOG.debug("Failed to get object from Metastore ", e);
+       throw new InvalidOperationException(
+           "Unable to change partition or table."
+               + " Check metastore logs for detailed stack." + e.getMessage());
+     } catch (InvalidInputException e) {
+         LOG.debug("Accessing Metastore failed due to invalid input ", e);
+         throw new InvalidOperationException(
+             "Unable to change partition or table."
+                 + " Check metastore logs for detailed stack." + e.getMessage());
+     } catch (NoSuchObjectException e) {
+       LOG.debug("Object not found in metastore ", e);
+       throw new InvalidOperationException(
+           "Unable to change partition or table. Database " + dbname + " does not exist"
+               + " Check metastore logs for detailed stack." + e.getMessage());
+     } finally {
+       if (!success) {
+         LOG.error("Failed to alter table " + TableName.getQualified(catName, dbname, name));
+         msdb.rollbackTransaction();
+         if (dataWasMoved) {
+           try {
+             if (destFs.exists(destPath)) {
+               if (!destFs.rename(destPath, srcPath)) {
+                 LOG.error("Failed to restore data from " + destPath + " to " + srcPath
+                     + " in alter table failure. Manual restore is needed.");
+               }
+             }
+           } catch (IOException e) {
+             LOG.error("Failed to restore data from " + destPath + " to " + srcPath
+                 +  " in alter table failure. Manual restore is needed.");
+           }
+         }
+       }
+     }
+ 
+     if (!listeners.isEmpty()) {
+       // I don't think event notifications in case of failures are necessary, but other HMS operations
+       // make this call whether the event failed or succeeded. To make this behavior consistent,
+       // this call is made for failed events also.
+       MetaStoreListenerNotifier.notifyEvent(listeners, EventMessage.EventType.ALTER_TABLE,
+           new AlterTableEvent(oldt, newt, false, success, handler),
+           environmentContext, txnAlterTableEventResponses, msdb);
+     }
+   }
+ 
+   /**
+    * MetaException that encapsulates error message from RemoteException from hadoop RPC which wrap
+    * the stack trace into e.getMessage() which makes logs/stack traces confusing.
+    * @param ex
+    * @return
+    */
+   String getSimpleMessage(Exception ex) {
+     if(ex instanceof MetaException) {
+       String msg = ex.getMessage();
+       if(msg == null || !msg.contains("\n")) {
+         return msg;
+       }
+       return msg.substring(0, msg.indexOf('\n'));
+     }
+     return ex.getMessage();
+   }
+ 
+   @Override
+   public Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname,
+     final String name, final List<String> part_vals, final Partition new_part,
+     EnvironmentContext environmentContext)
+       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
+     return alterPartition(msdb, wh, DEFAULT_CATALOG_NAME, dbname, name, part_vals, new_part,
 -        environmentContext, null);
++        environmentContext, null, -1, null);
+   }
+ 
+   @Override
 -  public Partition alterPartition(final RawStore msdb, Warehouse wh, final String catName,
 -                                  final String dbname, final String name,
 -                                  final List<String> part_vals, final Partition new_part,
 -                                  EnvironmentContext environmentContext, IHMSHandler handler)
++  public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, String dbname,
++      String name, List<String> part_vals, final Partition new_part,
++      EnvironmentContext environmentContext, IHMSHandler handler, long txnId, String validWriteIds)
+       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
+     boolean success = false;
+     Partition oldPart;
+     List<TransactionalMetaStoreEventListener> transactionalListeners = null;
+     if (handler != null) {
+       transactionalListeners = handler.getTransactionalListeners();
+     }
+ 
+     // Set DDL time to now if not specified
+     if (new_part.getParameters() == null ||
+         new_part.getParameters().get(hive_metastoreConstants.DDL_TIME) == null ||
+         Integer.parseInt(new_part.getParameters().get(hive_metastoreConstants.DDL_TIME)) == 0) {
+       new_part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System
+           .currentTimeMillis() / 1000));
+     }
+ 
+     //alter partition
+     if (part_vals == null || part_vals.size() == 0) {
+       try {
+         msdb.openTransaction();
+ 
 -        Table tbl = msdb.getTable(catName, dbname, name);
++        Table tbl = msdb.getTable(catName, dbname, name,  -1, null);
+         if (tbl == null) {
+           throw new InvalidObjectException(
+               "Unable to alter partition because table or database does not exist.");
+         }
+         oldPart = msdb.getPartition(catName, dbname, name, new_part.getValues());
+         if (MetaStoreUtils.requireCalStats(oldPart, new_part, tbl, environmentContext)) {
+           // if stats are same, no need to update
+           if (MetaStoreUtils.isFastStatsSame(oldPart, new_part)) {
+             MetaStoreUtils.updateBasicState(environmentContext, new_part.getParameters());
+           } else {
+             MetaStoreUtils.updatePartitionStatsFast(
+                 new_part, tbl, wh, false, true, environmentContext, false);
+           }
+         }
+ 
+         // PartitionView does not have SD. We do not need update its column stats
+         if (oldPart.getSd() != null) {
+           updateOrGetPartitionColumnStats(msdb, catName, dbname, name, new_part.getValues(),
+               oldPart.getSd().getCols(), tbl, new_part, null);
+         }
 -        msdb.alterPartition(catName, dbname, name, new_part.getValues(), new_part);
++        msdb.alterPartition(
++            catName, dbname, name, new_part.getValues(), new_part, txnId, validWriteIds);
+         if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
+           MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                                                 EventMessage.EventType.ALTER_PARTITION,
+                                                 new AlterPartitionEvent(oldPart, new_part, tbl, false, true, handler),
+                                                 environmentContext);
+ 
+ 
+         }
+         success = msdb.commitTransaction();
+       } catch (InvalidObjectException e) {
+         LOG.warn("Alter failed", e);
+         throw new InvalidOperationException("alter is not possible: " + e.getMessage());
+       } catch (NoSuchObjectException e) {
+         //old partition does not exist
+         throw new InvalidOperationException("alter is not possible: " + e.getMessage());
+       } finally {
+         if(!success) {
+           msdb.rollbackTransaction();
+         }
+       }
+       return oldPart;
+     }
+ 
+     //rename partition
+     String oldPartLoc;
+     String newPartLoc;
+     Path srcPath = null;
+     Path destPath = null;
+     FileSystem srcFs;
+     FileSystem destFs = null;
+     boolean dataWasMoved = false;
+     Database db;
+     try {
+       msdb.openTransaction();
 -      Table tbl = msdb.getTable(DEFAULT_CATALOG_NAME, dbname, name);
++      Table tbl = msdb.getTable(DEFAULT_CATALOG_NAME, dbname, name,  -1, null);
+       if (tbl == null) {
+         throw new InvalidObjectException(
+             "Unable to alter partition because table or database does not exist.");
+       }
+       try {
+         oldPart = msdb.getPartition(catName, dbname, name, part_vals);
+       } catch (NoSuchObjectException e) {
+         // this means there is no existing partition
+         throw new InvalidObjectException(
+             "Unable to rename partition because old partition does not exist");
+       }
+ 
+       Partition check_part;
+       try {
+         check_part = msdb.getPartition(catName, dbname, name, new_part.getValues());
+       } catch(NoSuchObjectException e) {
+         // this means there is no existing partition
+         check_part = null;
+       }
+ 
+       if (check_part != null) {
+         throw new AlreadyExistsException("Partition already exists:" + dbname + "." + name + "." +
+             new_part.getValues());
+       }
+ 
+       // when renaming a partition, we should update
+       // 1) partition SD Location
+       // 2) partition column stats if there are any because of part_name field in HMS table PART_COL_STATS
+       // 3) rename the partition directory if it is not an external table
+       if (!tbl.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) {
++        // TODO: refactor this into a separate method after master merge, this one is too big.
+         try {
+           db = msdb.getDatabase(catName, dbname);
+ 
+           // if tbl location is available use it
+           // else derive the tbl location from database location
+           destPath = wh.getPartitionPath(db, tbl, new_part.getValues());
+           destPath = constructRenamedPath(destPath, new Path(new_part.getSd().getLocation()));
+         } catch (NoSuchObjectException e) {
+           LOG.debug("Didn't find object in metastore ", e);
+           throw new InvalidOperationException(
+             "Unable to change partition or table. Database " + dbname + " does not exist"
+               + " Check metastore logs for detailed stack." + e.getMessage());
+         }
+ 
+         if (destPath != null) {
+           newPartLoc = destPath.toString();
+           oldPartLoc = oldPart.getSd().getLocation();
+           LOG.info("srcPath:" + oldPartLoc);
+           LOG.info("descPath:" + newPartLoc);
+           srcPath = new Path(oldPartLoc);
+           srcFs = wh.getFs(srcPath);
+           destFs = wh.getFs(destPath);
+           // check that src and dest are on the same file system
+           if (!FileUtils.equalsFileSystem(srcFs, destFs)) {
+             throw new InvalidOperationException("New table location " + destPath
+               + " is on a different file system than the old location "
+               + srcPath + ". This operation is not supported.");
+           }
+ 
+           try {
+             if (srcFs.exists(srcPath)) {
+               if (newPartLoc.compareTo(oldPartLoc) != 0 && destFs.exists(destPath)) {
+                 throw new InvalidOperationException("New location for this table "
+                   + tbl.getDbName() + "." + tbl.getTableName()
+                   + " already exists : " + destPath);
+               }
+               //if destPath's parent path doesn't exist, we should mkdir it
+               Path destParentPath = destPath.getParent();
+               if (!wh.mkdirs(destParentPath)) {
+                   throw new MetaException("Unable to create path " + destParentPath);
+               }
+ 
+               //rename the data directory
+               wh.renameDir(srcPath, destPath, ReplChangeManager.isSourceOfReplication(db));
+               LOG.info("Partition directory rename from " + srcPath + " to " + destPath + " done.");
+               dataWasMoved = true;
+             }
+           } catch (IOException e) {
+             LOG.error("Cannot rename partition directory from " + srcPath + " to " + destPath, e);
+             throw new InvalidOperationException("Unable to access src or dest location for partition "
+                 + tbl.getDbName() + "." + tbl.getTableName() + " " + new_part.getValues());
+           } catch (MetaException me) {
+             LOG.error("Cannot rename partition directory from " + srcPath + " to " + destPath, me);
+             throw me;
+           }
+           new_part.getSd().setLocation(newPartLoc);
+         }
+       } else {
+         new_part.getSd().setLocation(oldPart.getSd().getLocation());
+       }
+ 
+       if (MetaStoreUtils.requireCalStats(oldPart, new_part, tbl, environmentContext)) {
+         MetaStoreUtils.updatePartitionStatsFast(
+             new_part, tbl, wh, false, true, environmentContext, false);
+       }
+ 
+       String newPartName = Warehouse.makePartName(tbl.getPartitionKeys(), new_part.getValues());
+       ColumnStatistics cs = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, oldPart.getValues(),
+           oldPart.getSd().getCols(), tbl, new_part, null);
 -      msdb.alterPartition(catName, dbname, name, part_vals, new_part);
++      msdb.alterPartition(catName, dbname, name, part_vals, new_part, txnId, validWriteIds);
+       if (cs != null) {
+         cs.getStatsDesc().setPartName(newPartName);
+         try {
 -          msdb.updatePartitionColumnStatistics(cs, new_part.getValues());
++          msdb.updatePartitionColumnStatistics(cs, new_part.getValues(),
++              txnId, validWriteIds, new_part.getWriteId());
+         } catch (InvalidInputException iie) {
+           throw new InvalidOperationException("Unable to update partition stats in table rename." + iie);
+         } catch (NoSuchObjectException nsoe) {
+           // It is ok, ignore
+         }
+       }
+ 
+       if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
+         MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                                               EventMessage.EventType.ALTER_PARTITION,
+                                               new AlterPartitionEvent(oldPart, new_part, tbl, false, true, handler),
+                                               environmentContext);
+       }
+ 
+       success = msdb.commitTransaction();
+     } finally {
+       if (!success) {
+         LOG.error("Failed to rename a partition. Rollback transaction");
+         msdb.rollbackTransaction();
+         if (dataWasMoved) {
+           LOG.error("Revert the data move in renaming a partition.");
+           try {
+             if (destFs.exists(destPath)) {
+               wh.renameDir(destPath, srcPath, false);
+             }
+           } catch (MetaException me) {
+             LOG.error("Failed to restore partition data from " + destPath + " to " + srcPath
+                 +  " in alter partition failure. Manual restore is needed.");
+           } catch (IOException ioe) {
+             LOG.error("Failed to restore partition data from " + destPath + " to " + srcPath
+                 +  " in alter partition failure. Manual restore is needed.");
+           }
+         }
+       }
+     }
+     return oldPart;
+   }
+ 
++  @Deprecated
+   @Override
+   public List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, final String dbname,
+     final String name, final List<Partition> new_parts,
+     EnvironmentContext environmentContext)
+       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
+     return alterPartitions(msdb, wh, DEFAULT_CATALOG_NAME, dbname, name, new_parts,
 -        environmentContext, null);
++        environmentContext, -1, null, -1, null);
+   }
+ 
+   @Override
+   public List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, final String catName,
+                                          final String dbname, final String name,
+                                          final List<Partition> new_parts,
 -                                         EnvironmentContext environmentContext, IHMSHandler handler)
++                                         EnvironmentContext environmentContext,
++                                         long txnId, String writeIdList, long writeId,
++                                         IHMSHandler handler)
+       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
+     List<Partition> oldParts = new ArrayList<>();
+     List<List<String>> partValsList = new ArrayList<>();
+     List<TransactionalMetaStoreEventListener> transactionalListeners = null;
+     if (handler != null) {
+       transactionalListeners = handler.getTransactionalListeners();
+     }
+ 
+     boolean success = false;
+     try {
+       msdb.openTransaction();
+ 
 -      Table tbl = msdb.getTable(catName, dbname, name);
++      // Note: should we pass in write ID here? We only update stats on parts so probably not.
++      Table tbl = msdb.getTable(catName, dbname, name,  -1, null);
+       if (tbl == null) {
+         throw new InvalidObjectException(
+             "Unable to alter partitions because table or database does not exist.");
+       }
+       for (Partition tmpPart: new_parts) {
+         // Set DDL time to now if not specified
+         if (tmpPart.getParameters() == null ||
+             tmpPart.getParameters().get(hive_metastoreConstants.DDL_TIME) == null ||
+             Integer.parseInt(tmpPart.getParameters().get(hive_metastoreConstants.DDL_TIME)) == 0) {
+           tmpPart.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System
+               .currentTimeMillis() / 1000));
+         }
+ 
+         Partition oldTmpPart = msdb.getPartition(catName, dbname, name, tmpPart.getValues());
+         oldParts.add(oldTmpPart);
+         partValsList.add(tmpPart.getValues());
+ 
+         if (MetaStoreUtils.requireCalStats(oldTmpPart, tmpPart, tbl, environmentContext)) {
+           // Check if stats are same, no need to update
+           if (MetaStoreUtils.isFastStatsSame(oldTmpPart, tmpPart)) {
+             MetaStoreUtils.updateBasicState(environmentContext, tmpPart.getParameters());
+           } else {
+             MetaStoreUtils.updatePartitionStatsFast(
+                 tmpPart, tbl, wh, false, true, environmentContext, false);
+           }
+         }
+ 
+         // PartitionView does not have SD and we do not need to update its column stats
+         if (oldTmpPart.getSd() != null) {
+           updateOrGetPartitionColumnStats(msdb, catName, dbname, name, oldTmpPart.getValues(),
+               oldTmpPart.getSd().getCols(), tbl, tmpPart, null);
+         }
+       }
+ 
 -      msdb.alterPartitions(catName, dbname, name, partValsList, new_parts);
++      msdb.alterPartitions(catName, dbname, name, partValsList, new_parts, writeId, txnId, writeIdList);
+       Iterator<Partition> oldPartsIt = oldParts.iterator();
+       for (Partition newPart : new_parts) {
+         Partition oldPart;
+         if (oldPartsIt.hasNext()) {
+           oldPart = oldPartsIt.next();
+         } else {
+           throw new InvalidOperationException("Missing old partition corresponding to new partition " +
+               "when invoking MetaStoreEventListener for alterPartitions event.");
+         }
+ 
+         if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
+           MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                                                 EventMessage.EventType.ALTER_PARTITION,
+                                                 new AlterPartitionEvent(oldPart, newPart, tbl, false, true, handler));
+         }
+       }
+ 
+       success = msdb.commitTransaction();
+     } catch (InvalidObjectException | NoSuchObjectException e) {
+       throw new InvalidOperationException("Alter partition operation failed: " + e);
+     } finally {
+       if(!success) {
+         msdb.rollbackTransaction();
+       }
+     }
+ 
+     return oldParts;
+   }
+ 
+   private boolean checkPartialPartKeysEqual(List<FieldSchema> oldPartKeys,
+       List<FieldSchema> newPartKeys) {
+     //return true if both are null, or false if one is null and the other isn't
+     if (newPartKeys == null || oldPartKeys == null) {
+       return oldPartKeys == newPartKeys;
+     }
+     if (oldPartKeys.size() != newPartKeys.size()) {
+       return false;
+     }
+     Iterator<FieldSchema> oldPartKeysIter = oldPartKeys.iterator();
+     Iterator<FieldSchema> newPartKeysIter = newPartKeys.iterator();
+     FieldSchema oldFs;
+     FieldSchema newFs;
+     while (oldPartKeysIter.hasNext()) {
+       oldFs = oldPartKeysIter.next();
+       newFs = newPartKeysIter.next();
+       // Alter table can change the type of partition key now.
+       // So check the column name only.
+       if (!oldFs.getName().equals(newFs.getName())) {
+         return false;
+       }
+     }
+ 
+     return true;
+   }
+ 
+   /**
+    * Uses the scheme and authority of the object's current location and the path constructed
+    * using the object's new name to construct a path for the object's new location.
+    */
+   private Path constructRenamedPath(Path defaultNewPath, Path currentPath) {
+     URI currentUri = currentPath.toUri();
+ 
+     return new Path(currentUri.getScheme(), currentUri.getAuthority(),
+         defaultNewPath.toUri().getPath());
+   }
+ 
+   @VisibleForTesting
 -  void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTable)
++  void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTable,
++      EnvironmentContext ec, long txnId, String validWriteIds)
+       throws MetaException, InvalidObjectException {
+     String catName = normalizeIdentifier(oldTable.isSetCatName() ? oldTable.getCatName() :
+         getDefaultCatalog(conf));
+     String dbName = oldTable.getDbName().toLowerCase();
+     String tableName = normalizeIdentifier(oldTable.getTableName());
+     String newDbName = newTable.getDbName().toLowerCase();
+     String newTableName = normalizeIdentifier(newTable.getTableName());
+ 
+     try {
+       List<FieldSchema> oldCols = oldTable.getSd().getCols();
+       List<FieldSchema> newCols = newTable.getSd().getCols();
+       List<ColumnStatisticsObj> newStatsObjs = new ArrayList<>();
+       ColumnStatistics colStats = null;
 -      boolean updateColumnStats = true;
 -
 -      // Nothing to update if everything is the same
 -        if (newDbName.equals(dbName) &&
 -            newTableName.equals(tableName) &&
 -            MetaStoreUtils.columnsIncludedByNameType(oldCols, newCols)) {
 -          updateColumnStats = false;
++      boolean updateColumnStats = !newDbName.equals(dbName) || !newTableName.equals(tableName)
++          || !MetaStoreUtils.columnsIncludedByNameType(oldCols, newCols);
++      if (updateColumnStats) {
++        List<String> oldColNames = new ArrayList<>(oldCols.size());
++        for (FieldSchema oldCol : oldCols) {
++          oldColNames.add(oldCol.getName());
+         }
+ 
 -        if (updateColumnStats) {
 -          List<String> oldColNames = new ArrayList<>(oldCols.size());
 -          for (FieldSchema oldCol : oldCols) {
 -            oldColNames.add(oldCol.getName());
 -          }
 -
 -          // Collect column stats which need to be rewritten and remove old stats
 -          colStats = msdb.getTableColumnStatistics(catName, dbName, tableName, oldColNames);
 -          if (colStats == null) {
 -            updateColumnStats = false;
 -          } else {
 -            List<ColumnStatisticsObj> statsObjs = colStats.getStatsObj();
 -            if (statsObjs != null) {
 -              List<String> deletedCols = new ArrayList<>();
 -              for (ColumnStatisticsObj statsObj : statsObjs) {
 -                boolean found = false;
 -                for (FieldSchema newCol : newCols) {
 -                  if (statsObj.getColName().equalsIgnoreCase(newCol.getName())
 -                      && statsObj.getColType().equalsIgnoreCase(newCol.getType())) {
 -                    found = true;
 -                    break;
 -                  }
++        // NOTE: this doesn't check stats being compliant, but the alterTable call below does.
++        //       The worst we can do is delete the stats.
++        // Collect column stats which need to be rewritten and remove old stats.
++        colStats = msdb.getTableColumnStatistics(catName, dbName, tableName, oldColNames);
++        if (colStats == null) {
++          updateColumnStats = false;
++        } else {
++          List<ColumnStatisticsObj> statsObjs = colStats.getStatsObj();
++          if (statsObjs != null) {
++            List<String> deletedCols = new ArrayList<>();
++            for (ColumnStatisticsObj statsObj : statsObjs) {
++              boolean found = false;
++              for (FieldSchema newCol : newCols) {
++                if (statsObj.getColName().equalsIgnoreCase(newCol.getName())
++                    && statsObj.getColType().equalsIgnoreCase(newCol.getType())) {
++                  found = true;
++                  break;
+                 }
++              }
+ 
 -                if (found) {
 -                  if (!newDbName.equals(dbName) || !newTableName.equals(tableName)) {
 -                    msdb.deleteTableColumnStatistics(catName, dbName, tableName, statsObj.getColName());
 -                    newStatsObjs.add(statsObj);
 -                    deletedCols.add(statsObj.getColName());
 -                  }
 -                } else {
++              if (found) {
++                if (!newDbName.equals(dbName) || !newTableName.equals(tableName)) {
+                   msdb.deleteTableColumnStatistics(catName, dbName, tableName, statsObj.getColName());
++                  newStatsObjs.add(statsObj);
+                   deletedCols.add(statsObj.getColName());
+                 }
++              } else {
++                msdb.deleteTableColumnStatistics(catName, dbName, tableName, statsObj.getColName());
++                deletedCols.add(statsObj.getColName());
+               }
 -              StatsSetupConst.removeColumnStatsState(newTable.getParameters(), deletedCols);
+             }
++            StatsSetupConst.removeColumnStatsState(newTable.getParameters(), deletedCols);
+           }
+         }
++      }
+ 
 -        // Change to new table and append stats for the new table
 -        msdb.alterTable(catName, dbName, tableName, newTable);
 -        if (updateColumnStats && !newStatsObjs.isEmpty()) {
 -          ColumnStatisticsDesc statsDesc = colStats.getStatsDesc();
 -          statsDesc.setDbName(newDbName);
 -          statsDesc.setTableName(newTableName);
 -          colStats.setStatsObj(newStatsObjs);
 -          msdb.updateTableColumnStatistics(colStats);
 -        }
++      // Change to new table and append stats for the new table
++      msdb.alterTable(catName, dbName, tableName, newTable, txnId, validWriteIds);
++      if (updateColumnStats && !newStatsObjs.isEmpty()) {
++        ColumnStatisticsDesc statsDesc = colStats.getStatsDesc();
++        statsDesc.setDbName(newDbName);
++        statsDesc.setTableName(newTableName);
++        colStats.setStatsObj(newStatsObjs);
++        msdb.updateTableColumnStatistics(colStats, txnId, validWriteIds, newTable.getWriteId());
++      }
+     } catch (NoSuchObjectException nsoe) {
+       LOG.debug("Could not find db entry." + nsoe);
+     } catch (InvalidInputException e) {
+       //should not happen since the input were verified before passed in
+       throw new InvalidObjectException("Invalid inputs to update table column stats: " + e);
+     }
+   }
+ 
+   private ColumnStatistics updateOrGetPartitionColumnStats(
+       RawStore msdb, String catName, String dbname, String tblname, List<String> partVals,
+       List<FieldSchema> oldCols, Table table, Partition part, List<FieldSchema> newCols)
+           throws MetaException, InvalidObjectException {
+     ColumnStatistics newPartsColStats = null;
+     try {
+       // if newCols are not specified, use default ones.
+       if (newCols == null) {
+         newCols = part.getSd() == null ? new ArrayList<>() : part.getSd().getCols();
+       }
+       String oldPartName = Warehouse.makePartName(table.getPartitionKeys(), partVals);
+       String newPartName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues());
+       boolean rename = !part.getDbName().equals(dbname) || !part.getTableName().equals(tblname)
+           || !oldPartName.equals(newPartName);
+ 
+       // do not need to update column stats if alter partition is not for rename or changing existing columns
+       if (!rename && MetaStoreUtils.columnsIncludedByNameType(oldCols, newCols)) {
+         return newPartsColStats;
+       }
+       List<String> oldColNames = new ArrayList<>(oldCols.size());
+       for (FieldSchema oldCol : oldCols) {
+         oldColNames.add(oldCol.getName());
+       }
+       List<String> oldPartNames = Lists.newArrayList(oldPartName);
++      // TODO: doesn't take txn stats into account. This method can only remove stats.
+       List<ColumnStatistics> partsColStats = msdb.getPartitionColumnStatistics(catName, dbname, tblname,
+           oldPartNames, oldColNames);
+       assert (partsColStats.size() <= 1);
+       for (ColumnStatistics partColStats : partsColStats) { //actually only at most one loop
+         List<ColumnStatisticsObj> newStatsObjs = new ArrayList<>();
+         List<ColumnStatisticsObj> statsObjs = partColStats.getStatsObj();
+         List<String> deletedCols = new ArrayList<>();
+         for (ColumnStatisticsObj statsObj : statsObjs) {
+           boolean found =false;
+           for (FieldSchema newCol : newCols) {
+             if (statsObj.getColName().equalsIgnoreCase(newCol.getName())
+                 && statsObj.getColType().equalsIgnoreCase(newCol.getType())) {
+               found = true;
+               break;
+             }
+           }
+           if (found) {
+             if (rename) {
+               msdb.deletePartitionColumnStatistics(catName, dbname, tblname, partColStats.getStatsDesc().getPartName(),
+                   partVals, statsObj.getColName());
+               newStatsObjs.add(statsObj);
+             }
+           } else {
+             msdb.deletePartitionColumnStatistics(catName, dbname, tblname, partColStats.getStatsDesc().getPartName(),
+                 partVals, statsObj.getColName());
+             deletedCols.add(statsObj.getColName());
+           }
+         }
+         StatsSetupConst.removeColumnStatsState(part.getParameters(), deletedCols);
+         if (!newStatsObjs.isEmpty()) {
+           partColStats.setStatsObj(newStatsObjs);
+           newPartsColStats = partColStats;
+         }
+       }
+     } catch (NoSuchObjectException nsoe) {
+       // ignore this exception, actually this exception won't be thrown from getPartitionColumnStatistics
+     } catch (InvalidInputException iie) {
+       throw new InvalidObjectException("Invalid input to delete partition column stats." + iie);
+     }
+ 
+     return newPartsColStats;
+   }
+ 
+   private void checkColTypeChangeCompatible(List<FieldSchema> oldCols, List<FieldSchema> newCols)
+       throws InvalidOperationException {
+     List<String> incompatibleCols = new ArrayList<>();
+     int maxCols = Math.min(oldCols.size(), newCols.size());
+     for (int i = 0; i < maxCols; i++) {
+       if (!ColumnType.areColTypesCompatible(
+           ColumnType.getTypeName(oldCols.get(i).getType()),
+           ColumnType.getTypeName(newCols.get(i).getType()))) {
+         incompatibleCols.add(newCols.get(i).getName());
+       }
+     }
+     if (!incompatibleCols.isEmpty()) {
+       throw new InvalidOperationException(
+           "The following columns have types incompatible with the existing " +
+               "columns in their respective positions :\n" +
+               org.apache.commons.lang.StringUtils.join(incompatibleCols, ',')
+       );
+     }
+   }
+ 
+ }


[15/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
index 0000000,33f24fb..080cc52
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
@@@ -1,0 -1,504 +1,509 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.txn;
+ 
+ import com.google.common.annotations.VisibleForTesting;
++
+ import org.apache.hadoop.classification.InterfaceAudience;
+ import org.apache.hadoop.classification.InterfaceStability;
+ import org.apache.hadoop.conf.Configurable;
+ import org.apache.hadoop.hive.common.ValidTxnList;
+ import org.apache.hadoop.hive.common.ValidWriteIdList;
+ import org.apache.hadoop.hive.common.classification.RetrySemantics;
+ import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.events.AcidWriteEvent;
+ 
+ import java.sql.SQLException;
+ import java.util.Iterator;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Set;
+ 
+ /**
+  * A handler to answer transaction related calls that come into the metastore
+  * server.
+  */
+ @InterfaceAudience.Private
+ @InterfaceStability.Evolving
+ public interface TxnStore extends Configurable {
+ 
+   enum MUTEX_KEY {
+     Initiator, Cleaner, HouseKeeper, CompactionHistory, CheckLock,
+     WriteSetCleaner, CompactionScheduler, WriteIdAllocator, MaterializationRebuild
+   }
+   // Compactor states (Should really be enum)
+   String INITIATED_RESPONSE = "initiated";
+   String WORKING_RESPONSE = "working";
+   String CLEANING_RESPONSE = "ready for cleaning";
+   String FAILED_RESPONSE = "failed";
+   String SUCCEEDED_RESPONSE = "succeeded";
+   String ATTEMPTED_RESPONSE = "attempted";
+ 
+   int TIMED_OUT_TXN_ABORT_BATCH_SIZE = 50000;
+ 
+   /**
+    * Get information about open transactions.  This gives extensive information about the
+    * transactions rather than just the list of transactions.  This should be used when the need
+    * is to see information about the transactions (e.g. show transactions).
+    * @return information about open transactions
+    * @throws MetaException
+    */
+   @RetrySemantics.ReadOnly
+   GetOpenTxnsInfoResponse getOpenTxnsInfo() throws MetaException;
+ 
+   /**
+    * Get list of valid transactions.  This gives just the list of transactions that are open.
+    * @return list of open transactions, as well as a high water mark.
+    * @throws MetaException
+    */
+   @RetrySemantics.ReadOnly
+   GetOpenTxnsResponse getOpenTxns() throws MetaException;
+ 
+   /**
+    * Get the count for open transactions.
+    * @throws MetaException
+    */
+   @RetrySemantics.ReadOnly
+   void countOpenTxns() throws MetaException;
+ 
+   /**
+    * Open a set of transactions
+    * @param rqst request to open transactions
+    * @return information on opened transactions
+    * @throws MetaException
+    */
+   @RetrySemantics.Idempotent
+   OpenTxnsResponse openTxns(OpenTxnRequest rqst) throws MetaException;
+ 
+   @RetrySemantics.Idempotent
+   long getTargetTxnId(String replPolicy, long sourceTxnId) throws MetaException;
+ 
+   /**
+    * Abort (rollback) a transaction.
+    * @param rqst info on transaction to abort
+    * @throws NoSuchTxnException
+    * @throws MetaException
+    */
+   @RetrySemantics.Idempotent
+   void abortTxn(AbortTxnRequest rqst) throws NoSuchTxnException, MetaException, TxnAbortedException;
+ 
+   /**
+    * Abort (rollback) a list of transactions in one request.
+    * @param rqst info on transactions to abort
+    * @throws NoSuchTxnException
+    * @throws MetaException
+    */
+   @RetrySemantics.Idempotent
+   void abortTxns(AbortTxnsRequest rqst) throws NoSuchTxnException, MetaException;
+ 
+   /**
+    * Commit a transaction
+    * @param rqst info on transaction to commit
+    * @throws NoSuchTxnException
+    * @throws TxnAbortedException
+    * @throws MetaException
+    */
+   @RetrySemantics.Idempotent
+   void commitTxn(CommitTxnRequest rqst)
+     throws NoSuchTxnException, TxnAbortedException,  MetaException;
+ 
+   /**
+    * Replicate Table Write Ids state to mark aborted write ids and writeid high water mark.
+    * @param rqst info on table/partitions and writeid snapshot to replicate.
+    * @throws MetaException in case of failure
+    */
+   @RetrySemantics.Idempotent
+   void replTableWriteIdState(ReplTblWriteIdStateRequest rqst) throws MetaException;
+ 
+   /**
+    * Get invalidation info for the materialization. Currently, the materialization information
+    * only contains information about whether there was update/delete operations on the source
+    * tables used by the materialization since it was created.
+    * @param cm creation metadata for the materialization
+    * @param validTxnList valid transaction list for snapshot taken for current query
+    * @throws MetaException
+    */
+   @RetrySemantics.Idempotent
+   Materialization getMaterializationInvalidationInfo(
+       final CreationMetadata cm, final String validTxnList)
+           throws MetaException;
+ 
++  @RetrySemantics.ReadOnly
++  long getTxnIdForWriteId(String dbName, String tblName, long writeId)
++      throws MetaException;
++
+   LockResponse lockMaterializationRebuild(String dbName, String tableName, long txnId)
+       throws MetaException;
+ 
+   boolean heartbeatLockMaterializationRebuild(String dbName, String tableName, long txnId)
+       throws MetaException;
+ 
+   long cleanupMaterializationRebuildLocks(ValidTxnList validTxnList, long timeout)
+       throws MetaException;
+ 
+     /**
+      * Gets the list of valid write ids for the given table wrt to current txn
+      * @param rqst info on transaction and list of table names associated with given transaction
+      * @throws NoSuchTxnException
+      * @throws MetaException
+      */
+   @RetrySemantics.ReadOnly
+   GetValidWriteIdsResponse getValidWriteIds(GetValidWriteIdsRequest rqst)
+           throws NoSuchTxnException,  MetaException;
+ 
+   /**
+    * Allocate a write ID for the given table and associate it with a transaction
+    * @param rqst info on transaction and table to allocate write id
+    * @throws NoSuchTxnException
+    * @throws TxnAbortedException
+    * @throws MetaException
+    */
+   AllocateTableWriteIdsResponse allocateTableWriteIds(AllocateTableWriteIdsRequest rqst)
+     throws NoSuchTxnException, TxnAbortedException, MetaException;
+ 
+   /**
+    * Called on conversion of existing table to full acid.  Sets initial write ID to a high
+    * enough value so that we can assign unique ROW__IDs to data in existing files.
+    */
+   void seedWriteIdOnAcidConversion(InitializeTableWriteIdsRequest rqst) throws MetaException;
+ 
+   /**
+    * Obtain a lock.
+    * @param rqst information on the lock to obtain.  If the requester is part of a transaction
+    *             the txn information must be included in the lock request.
+    * @return info on the lock, including whether it was obtained.
+    * @throws NoSuchTxnException
+    * @throws TxnAbortedException
+    * @throws MetaException
+    */
+   @RetrySemantics.CannotRetry
+   LockResponse lock(LockRequest rqst)
+     throws NoSuchTxnException, TxnAbortedException, MetaException;
+ 
+   /**
+    * Check whether a lock has been obtained.  This is used after {@link #lock} returned a wait
+    * state.
+    * @param rqst info on the lock to check
+    * @return info on the state of the lock
+    * @throws NoSuchTxnException
+    * @throws NoSuchLockException
+    * @throws TxnAbortedException
+    * @throws MetaException
+    */
+   @RetrySemantics.SafeToRetry
+   LockResponse checkLock(CheckLockRequest rqst)
+     throws NoSuchTxnException, NoSuchLockException, TxnAbortedException, MetaException;
+ 
+   /**
+    * Unlock a lock.  It is not legal to call this if the caller is part of a txn.  In that case
+    * the txn should be committed or aborted instead.  (Note someday this will change since
+    * multi-statement transactions will allow unlocking in the transaction.)
+    * @param rqst lock to unlock
+    * @throws NoSuchLockException
+    * @throws TxnOpenException
+    * @throws MetaException
+    */
+   @RetrySemantics.Idempotent
+   void unlock(UnlockRequest rqst)
+     throws NoSuchLockException, TxnOpenException, MetaException;
+ 
+   /**
+    * Get information on current locks.
+    * @param rqst lock information to retrieve
+    * @return lock information.
+    * @throws MetaException
+    */
+   @RetrySemantics.ReadOnly
+   ShowLocksResponse showLocks(ShowLocksRequest rqst) throws MetaException;
+ 
+   /**
+    * Send a heartbeat for a lock or a transaction
+    * @param ids lock and/or txn id to heartbeat
+    * @throws NoSuchTxnException
+    * @throws NoSuchLockException
+    * @throws TxnAbortedException
+    * @throws MetaException
+    */
+   @RetrySemantics.SafeToRetry
+   void heartbeat(HeartbeatRequest ids)
+     throws NoSuchTxnException,  NoSuchLockException, TxnAbortedException, MetaException;
+ 
+   /**
+    * Heartbeat a group of transactions together
+    * @param rqst set of transactions to heartbat
+    * @return info on txns that were heartbeated
+    * @throws MetaException
+    */
+   @RetrySemantics.SafeToRetry
+   HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst)
+     throws MetaException;
+ 
+   /**
+    * Submit a compaction request into the queue.  This is called when a user manually requests a
+    * compaction.
+    * @param rqst information on what to compact
+    * @return id of the compaction that has been started or existing id if this resource is already scheduled
+    * @throws MetaException
+    */
+   @RetrySemantics.Idempotent
+   CompactionResponse compact(CompactionRequest rqst) throws MetaException;
+ 
+   /**
+    * Show list of current compactions.
+    * @param rqst info on which compactions to show
+    * @return compaction information
+    * @throws MetaException
+    */
+   @RetrySemantics.ReadOnly
+   ShowCompactResponse showCompact(ShowCompactRequest rqst) throws MetaException;
+ 
+   /**
+    * Add information on a set of dynamic partitions that participated in a transaction.
+    * @param rqst dynamic partition info.
+    * @throws NoSuchTxnException
+    * @throws TxnAbortedException
+    * @throws MetaException
+    */
+   @RetrySemantics.SafeToRetry
+   void addDynamicPartitions(AddDynamicPartitions rqst)
+       throws NoSuchTxnException,  TxnAbortedException, MetaException;
+ 
+   /**
+    * Clean up corresponding records in metastore tables.
+    * @param type Hive object type
+    * @param db database object
+    * @param table table object
+    * @param partitionIterator partition iterator
+    * @throws MetaException
+    */
+   @RetrySemantics.Idempotent
+   void cleanupRecords(HiveObjectType type, Database db, Table table,
+                              Iterator<Partition> partitionIterator) throws MetaException;
+ 
+   @RetrySemantics.Idempotent
+   void onRename(String oldCatName, String oldDbName, String oldTabName, String oldPartName,
+       String newCatName, String newDbName, String newTabName, String newPartName)
+       throws MetaException;
+ 
+   /**
+    * Timeout transactions and/or locks.  This should only be called by the compactor.
+    */
+   @RetrySemantics.Idempotent
+   void performTimeOuts();
+ 
+   /**
+    * This will look through the completed_txn_components table and look for partitions or tables
+    * that may be ready for compaction.  Also, look through txns and txn_components tables for
+    * aborted transactions that we should add to the list.
+    * @param maxAborted Maximum number of aborted queries to allow before marking this as a
+    *                   potential compaction.
+    * @return list of CompactionInfo structs.  These will not have id, type,
+    * or runAs set since these are only potential compactions not actual ones.
+    */
+   @RetrySemantics.ReadOnly
+   Set<CompactionInfo> findPotentialCompactions(int maxAborted) throws MetaException;
+ 
+   /**
+    * Sets the user to run as.  This is for the case
+    * where the request was generated by the user and so the worker must set this value later.
+    * @param cq_id id of this entry in the queue
+    * @param user user to run the jobs as
+    */
+   @RetrySemantics.Idempotent
+   void setRunAs(long cq_id, String user) throws MetaException;
+ 
+   /**
+    * This will grab the next compaction request off of
+    * the queue, and assign it to the worker.
+    * @param workerId id of the worker calling this, will be recorded in the db
+    * @return an info element for this compaction request, or null if there is no work to do now.
+    */
+   @RetrySemantics.ReadOnly
+   CompactionInfo findNextToCompact(String workerId) throws MetaException;
+ 
+   /**
+    * This will mark an entry in the queue as compacted
+    * and put it in the ready to clean state.
+    * @param info info on the compaction entry to mark as compacted.
+    */
+   @RetrySemantics.SafeToRetry
+   void markCompacted(CompactionInfo info) throws MetaException;
+ 
+   /**
+    * Find entries in the queue that are ready to
+    * be cleaned.
+    * @return information on the entry in the queue.
+    */
+   @RetrySemantics.ReadOnly
+   List<CompactionInfo> findReadyToClean() throws MetaException;
+ 
+   /**
+    * This will remove an entry from the queue after
+    * it has been compacted.
+    * 
+    * @param info info on the compaction entry to remove
+    */
+   @RetrySemantics.CannotRetry
+   void markCleaned(CompactionInfo info) throws MetaException;
+ 
+   /**
+    * Mark a compaction entry as failed.  This will move it to the compaction history queue with a
+    * failed status.  It will NOT clean up aborted transactions in the table/partition associated
+    * with this compaction.
+    * @param info information on the compaction that failed.
+    * @throws MetaException
+    */
+   @RetrySemantics.CannotRetry
+   void markFailed(CompactionInfo info) throws MetaException;
+ 
+   /**
+    * Clean up entries from TXN_TO_WRITE_ID table less than min_uncommited_txnid as found by
+    * min(NEXT_TXN_ID.ntxn_next, min(MIN_HISTORY_LEVEL.mhl_min_open_txnid), min(Aborted TXNS.txn_id)).
+    */
+   @RetrySemantics.SafeToRetry
+   void cleanTxnToWriteIdTable() throws MetaException;
+ 
+   /**
+    * Clean up aborted transactions from txns that have no components in txn_components.  The reson such
+    * txns exist can be that now work was done in this txn (e.g. Streaming opened TransactionBatch and
+    * abandoned it w/o doing any work) or due to {@link #markCleaned(CompactionInfo)} being called.
+    */
+   @RetrySemantics.SafeToRetry
+   void cleanEmptyAbortedTxns() throws MetaException;
+ 
+   /**
+    * This will take all entries assigned to workers
+    * on a host return them to INITIATED state.  The initiator should use this at start up to
+    * clean entries from any workers that were in the middle of compacting when the metastore
+    * shutdown.  It does not reset entries from worker threads on other hosts as those may still
+    * be working.
+    * @param hostname Name of this host.  It is assumed this prefixes the thread's worker id,
+    *                 so that like hostname% will match the worker id.
+    */
+   @RetrySemantics.Idempotent
+   void revokeFromLocalWorkers(String hostname) throws MetaException;
+ 
+   /**
+    * This call will return all compaction queue
+    * entries assigned to a worker but over the timeout back to the initiated state.
+    * This should be called by the initiator on start up and occasionally when running to clean up
+    * after dead threads.  At start up {@link #revokeFromLocalWorkers(String)} should be called
+    * first.
+    * @param timeout number of milliseconds since start time that should elapse before a worker is
+    *                declared dead.
+    */
+   @RetrySemantics.Idempotent
+   void revokeTimedoutWorkers(long timeout) throws MetaException;
+ 
+   /**
+    * Queries metastore DB directly to find columns in the table which have statistics information.
+    * If {@code ci} includes partition info then per partition stats info is examined, otherwise
+    * table level stats are examined.
+    * @throws MetaException
+    */
+   @RetrySemantics.ReadOnly
+   List<String> findColumnsWithStats(CompactionInfo ci) throws MetaException;
+ 
+   /**
+    * Record the highest write id that the {@code ci} compaction job will pay attention to.
+    */
+   @RetrySemantics.Idempotent
+   void setCompactionHighestWriteId(CompactionInfo ci, long highestWriteId) throws MetaException;
+ 
+   /**
+    * For any given compactable entity (partition, table if not partitioned) the history of compactions
+    * may look like "sssfffaaasffss", for example.  The idea is to retain the tail (most recent) of the
+    * history such that a configurable number of each type of state is present.  Any other entries
+    * can be purged.  This scheme has advantage of always retaining the last failure/success even if
+    * it's not recent.
+    * @throws MetaException
+    */
+   @RetrySemantics.SafeToRetry
+   void purgeCompactionHistory() throws MetaException;
+ 
+   /**
+    * WriteSet tracking is used to ensure proper transaction isolation.  This method deletes the 
+    * transaction metadata once it becomes unnecessary.  
+    */
+   @RetrySemantics.SafeToRetry
+   void performWriteSetGC();
+ 
+   /**
+    * Determine if there are enough consecutive failures compacting a table or partition that no
+    * new automatic compactions should be scheduled.  User initiated compactions do not do this
+    * check.
+    * @param ci  Table or partition to check.
+    * @return true if it is ok to compact, false if there have been too many failures.
+    * @throws MetaException
+    */
+   @RetrySemantics.ReadOnly
+   boolean checkFailedCompactions(CompactionInfo ci) throws MetaException;
+ 
+   @VisibleForTesting
+   int numLocksInLockTable() throws SQLException, MetaException;
+ 
+   @VisibleForTesting
+   long setTimeout(long milliseconds);
+ 
+   @RetrySemantics.Idempotent
+   MutexAPI getMutexAPI();
+ 
+   /**
+    * This is primarily designed to provide coarse grained mutex support to operations running
+    * inside the Metastore (of which there could be several instances).  The initial goal is to 
+    * ensure that various sub-processes of the Compactor don't step on each other.
+    * 
+    * In RDMBS world each {@code LockHandle} uses a java.sql.Connection so use it sparingly.
+    */
+   interface MutexAPI {
+     /**
+      * The {@code key} is name of the lock. Will acquire and exclusive lock or block.  It retuns
+      * a handle which must be used to release the lock.  Each invocation returns a new handle.
+      */
+     LockHandle acquireLock(String key) throws MetaException;
+ 
+     /**
+      * Same as {@link #acquireLock(String)} but takes an already existing handle as input.  This 
+      * will associate the lock on {@code key} with the same handle.  All locks associated with
+      * the same handle will be released together.
+      * @param handle not NULL
+      */
+     void acquireLock(String key, LockHandle handle) throws MetaException;
+     interface LockHandle {
+       /**
+        * Releases all locks associated with this handle.
+        */
+       void releaseLocks();
+     }
+   }
+ 
+   /**
+    * Once a {@link java.util.concurrent.ThreadPoolExecutor} Worker submits a job to the cluster,
+    * it calls this to update the metadata.
+    * @param id {@link CompactionInfo#id}
+    */
+   @RetrySemantics.Idempotent
+   void setHadoopJobId(String hadoopJobId, long id);
+ 
+   /**
+    * Add the ACID write event information to writeNotificationLog table.
+    * @param acidWriteEvent
+    */
+   @RetrySemantics.Idempotent
+   void addWriteNotificationLog(AcidWriteEvent acidWriteEvent) throws MetaException;
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
index 0000000,fa291d5..aac5811
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
@@@ -1,0 -1,471 +1,481 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  * <p/>
+  * http://www.apache.org/licenses/LICENSE-2.0
+  * <p/>
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.txn;
+ 
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.ValidCompactorWriteIdList;
+ import org.apache.hadoop.hive.common.ValidReaderWriteIdList;
+ import org.apache.hadoop.hive.common.ValidReadTxnList;
+ import org.apache.hadoop.hive.common.ValidTxnList;
+ import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
+ import org.apache.hadoop.hive.common.ValidWriteIdList;
+ import org.apache.hadoop.hive.metastore.TransactionalValidationListener;
 -import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse;
 -import org.apache.hadoop.hive.metastore.api.GetValidWriteIdsResponse;
 -import org.apache.hadoop.hive.metastore.api.Table;
 -import org.apache.hadoop.hive.metastore.api.TableValidWriteIds;
 -import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
++import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import java.util.Collections;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.BitSet;
+ import java.util.List;
+ import java.util.Map;
+ 
+ public class TxnUtils {
+   private static final Logger LOG = LoggerFactory.getLogger(TxnUtils.class);
+ 
++  // Transactional stats states
++  static final public char STAT_OPEN = 'o';
++  static final public char STAT_INVALID = 'i';
++  static final public char STAT_COMMITTED = 'c';
++  static final public char STAT_OBSOLETE = 's';
++
+   /**
+    * Transform a {@link org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse} to a
+    * {@link org.apache.hadoop.hive.common.ValidTxnList}.  This assumes that the caller intends to
+    * read the files, and thus treats both open and aborted transactions as invalid.
+    * @param txns txn list from the metastore
+    * @param currentTxn Current transaction that the user has open.  If this is greater than 0 it
+    *                   will be removed from the exceptions list so that the user sees his own
+    *                   transaction as valid.
+    * @return a valid txn list.
+    */
+   public static ValidTxnList createValidReadTxnList(GetOpenTxnsResponse txns, long currentTxn) {
+     /*
+      * The highWaterMark should be min(currentTxn,txns.getTxn_high_water_mark()) assuming currentTxn>0
+      * otherwise if currentTxn=7 and 8 commits before 7, then 7 will see result of 8 which
+      * doesn't make sense for Snapshot Isolation. Of course for Read Committed, the list should
+      * include the latest committed set.
+      */
+     long highWaterMark = (currentTxn > 0) ? Math.min(currentTxn, txns.getTxn_high_water_mark())
+                                           : txns.getTxn_high_water_mark();
+ 
+     // Open txns are already sorted in ascending order. This list may or may not include HWM
+     // but it is guaranteed that list won't have txn > HWM. But, if we overwrite the HWM with currentTxn
+     // then need to truncate the exceptions list accordingly.
+     List<Long> openTxns = txns.getOpen_txns();
+ 
+     // We care only about open/aborted txns below currentTxn and hence the size should be determined
+     // for the exceptions list. The currentTxn will be missing in openTxns list only in rare case like
+     // txn is aborted by AcidHouseKeeperService and compactor actually cleans up the aborted txns.
+     // So, for such cases, we get negative value for sizeToHwm with found position for currentTxn, and so,
+     // we just negate it to get the size.
+     int sizeToHwm = (currentTxn > 0) ? Collections.binarySearch(openTxns, currentTxn) : openTxns.size();
+     sizeToHwm = (sizeToHwm < 0) ? (-sizeToHwm) : sizeToHwm;
+     long[] exceptions = new long[sizeToHwm];
+     BitSet inAbortedBits = BitSet.valueOf(txns.getAbortedBits());
+     BitSet outAbortedBits = new BitSet();
+     long minOpenTxnId = Long.MAX_VALUE;
+     int i = 0;
+     for (long txn : openTxns) {
+       // For snapshot isolation, we don't care about txns greater than current txn and so stop here.
+       // Also, we need not include current txn to exceptions list.
+       if ((currentTxn > 0) && (txn >= currentTxn)) {
+         break;
+       }
+       if (inAbortedBits.get(i)) {
+         outAbortedBits.set(i);
+       } else if (minOpenTxnId == Long.MAX_VALUE) {
+         minOpenTxnId = txn;
+       }
+       exceptions[i++] = txn;
+     }
+     return new ValidReadTxnList(exceptions, outAbortedBits, highWaterMark, minOpenTxnId);
+   }
+ 
+   /**
+    * Transform a {@link org.apache.hadoop.hive.metastore.api.GetValidWriteIdsResponse} to a
+    * {@link org.apache.hadoop.hive.common.ValidTxnWriteIdList}.  This assumes that the caller intends to
+    * read the files, and thus treats both open and aborted transactions as invalid.
+    * @param currentTxnId current txn ID for which we get the valid write ids list
+    * @param list valid write ids list from the metastore
+    * @return a valid write IDs list for the whole transaction.
+    */
+   public static ValidTxnWriteIdList createValidTxnWriteIdList(Long currentTxnId,
+                                                               List<TableValidWriteIds> validIds) {
+     ValidTxnWriteIdList validTxnWriteIdList = new ValidTxnWriteIdList(currentTxnId);
+     for (TableValidWriteIds tableWriteIds : validIds) {
+       validTxnWriteIdList.addTableValidWriteIdList(createValidReaderWriteIdList(tableWriteIds));
+     }
+     return validTxnWriteIdList;
+   }
+ 
+   /**
+    * Transform a {@link org.apache.hadoop.hive.metastore.api.TableValidWriteIds} to a
+    * {@link org.apache.hadoop.hive.common.ValidReaderWriteIdList}.  This assumes that the caller intends to
+    * read the files, and thus treats both open and aborted write ids as invalid.
+    * @param tableWriteIds valid write ids for the given table from the metastore
+    * @return a valid write IDs list for the input table
+    */
+   public static ValidReaderWriteIdList createValidReaderWriteIdList(TableValidWriteIds tableWriteIds) {
+     String fullTableName = tableWriteIds.getFullTableName();
+     long highWater = tableWriteIds.getWriteIdHighWaterMark();
+     List<Long> invalids = tableWriteIds.getInvalidWriteIds();
+     BitSet abortedBits = BitSet.valueOf(tableWriteIds.getAbortedBits());
+     long[] exceptions = new long[invalids.size()];
+     int i = 0;
+     for (long writeId : invalids) {
+       exceptions[i++] = writeId;
+     }
+     if (tableWriteIds.isSetMinOpenWriteId()) {
+       return new ValidReaderWriteIdList(fullTableName, exceptions, abortedBits, highWater,
+                                         tableWriteIds.getMinOpenWriteId());
+     } else {
+       return new ValidReaderWriteIdList(fullTableName, exceptions, abortedBits, highWater);
+     }
+   }
+ 
+   /**
+    * Transform a {@link org.apache.hadoop.hive.metastore.api.TableValidWriteIds} to a
+    * {@link org.apache.hadoop.hive.common.ValidCompactorWriteIdList}.  This assumes that the caller intends to
+    * compact the files, and thus treats only open transactions/write ids as invalid.  Additionally any
+    * writeId &gt; highestOpenWriteId is also invalid.  This is to avoid creating something like
+    * delta_17_120 where writeId 80, for example, is still open.
+    * @param tableValidWriteIds table write id list from the metastore
+    * @return a valid write id list.
+    */
+   public static ValidCompactorWriteIdList createValidCompactWriteIdList(TableValidWriteIds tableValidWriteIds) {
+     String fullTableName = tableValidWriteIds.getFullTableName();
+     long highWater = tableValidWriteIds.getWriteIdHighWaterMark();
+     long minOpenWriteId = Long.MAX_VALUE;
+     List<Long> invalids = tableValidWriteIds.getInvalidWriteIds();
+     BitSet abortedBits = BitSet.valueOf(tableValidWriteIds.getAbortedBits());
+     long[] exceptions = new long[invalids.size()];
+     int i = 0;
+     for (long writeId : invalids) {
+       if (abortedBits.get(i)) {
+         // Only need aborted since we don't consider anything above minOpenWriteId
+         exceptions[i++] = writeId;
+       } else {
+         minOpenWriteId = Math.min(minOpenWriteId, writeId);
+       }
+     }
+     if(i < exceptions.length) {
+       exceptions = Arrays.copyOf(exceptions, i);
+     }
+     highWater = minOpenWriteId == Long.MAX_VALUE ? highWater : minOpenWriteId - 1;
+     BitSet bitSet = new BitSet(exceptions.length);
+     bitSet.set(0, exceptions.length); // for ValidCompactorWriteIdList, everything in exceptions are aborted
+     if (minOpenWriteId == Long.MAX_VALUE) {
+       return new ValidCompactorWriteIdList(fullTableName, exceptions, bitSet, highWater);
+     } else {
+       return new ValidCompactorWriteIdList(fullTableName, exceptions, bitSet, highWater, minOpenWriteId);
+     }
+   }
+ 
+   public static ValidReaderWriteIdList updateForCompactionQuery(ValidReaderWriteIdList ids) {
+     // This is based on the existing valid write ID list that was built for a select query;
+     // therefore we assume all the aborted txns, etc. were already accounted for.
+     // All we do is adjust the high watermark to only include contiguous txns.
+     Long minOpenWriteId = ids.getMinOpenWriteId();
+     if (minOpenWriteId != null && minOpenWriteId != Long.MAX_VALUE) {
+       return ids.updateHighWatermark(ids.getMinOpenWriteId() - 1);
+     }
+     return ids;
+   }
+ 
+   /**
+    * Get an instance of the TxnStore that is appropriate for this store
+    * @param conf configuration
+    * @return txn store
+    */
+   public static TxnStore getTxnStore(Configuration conf) {
+     String className = MetastoreConf.getVar(conf, ConfVars.TXN_STORE_IMPL);
+     try {
+       TxnStore handler = JavaUtils.getClass(className, TxnStore.class).newInstance();
+       handler.setConf(conf);
+       return handler;
+     } catch (Exception e) {
+       LOG.error("Unable to instantiate raw store directly in fastpath mode", e);
+       throw new RuntimeException(e);
+     }
+   }
+ 
+   /**
+    * Note, users are responsible for using the correct TxnManager. We do not look at
+    * SessionState.get().getTxnMgr().supportsAcid() here
+    * Should produce the same result as
+    * {@link org.apache.hadoop.hive.ql.io.AcidUtils#isTransactionalTable(org.apache.hadoop.hive.ql.metadata.Table)}.
+    * @return true if table is a transactional table, false otherwise
+    */
+   public static boolean isTransactionalTable(Table table) {
+     if (table == null) {
+       return false;
+     }
+     Map<String, String> parameters = table.getParameters();
+     String tableIsTransactional = parameters.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL);
++    return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true");
++  }
++
++  public static boolean isTransactionalTable(Map<String, String> parameters) {
++    if (parameters == null) {
++      return false;
++    }
++    String tableIsTransactional = parameters.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL);
+     return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true");
+   }
+ 
+   /**
+    * Should produce the same result as
+    * {@link org.apache.hadoop.hive.ql.io.AcidUtils#isAcidTable(org.apache.hadoop.hive.ql.metadata.Table)}.
+    */
+   public static boolean isAcidTable(Table table) {
+     return TxnUtils.isTransactionalTable(table) &&
+       TransactionalValidationListener.DEFAULT_TRANSACTIONAL_PROPERTY.equals(table.getParameters()
+       .get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES));
+   }
+ 
+   /**
+    * Should produce the result as <dbName>.<tableName>.
+    */
+   public static String getFullTableName(String dbName, String tableName) {
+     return dbName.toLowerCase() + "." + tableName.toLowerCase();
+   }
+ 
+   public static String[] getDbTableName(String fullTableName) {
+     return fullTableName.split("\\.");
+   }
+ 
+ 
+ 
+   /**
+    * Build a query (or queries if one query is too big but only for the case of 'IN'
+    * composite clause. For the case of 'NOT IN' clauses, multiple queries change
+    * the semantics of the intended query.
+    * E.g., Let's assume that input "inList" parameter has [5, 6] and that
+    * _DIRECT_SQL_MAX_QUERY_LENGTH_ configuration parameter only allows one value in a 'NOT IN' clause,
+    * Then having two delete statements changes the semantics of the inteneded SQL statement.
+    * I.e. 'delete from T where a not in (5)' and 'delete from T where a not in (6)' sequence
+    * is not equal to 'delete from T where a not in (5, 6)'.)
+    * with one or multiple 'IN' or 'NOT IN' clauses with the given input parameters.
+    *
+    * Note that this method currently support only single column for
+    * IN/NOT IN clauses and that only covers OR-based composite 'IN' clause and
+    * AND-based composite 'NOT IN' clause.
+    * For example, for 'IN' clause case, the method will build a query with OR.
+    * E.g., "id in (1,2,3) OR id in (4,5,6)".
+    * For 'NOT IN' case, NOT IN list is broken into multiple 'NOT IN" clauses connected by AND.
+    *
+    * Note that, in this method, "a composite 'IN' clause" is defined as "a list of multiple 'IN'
+    * clauses in a query".
+    *
+    * @param queries   OUT: Array of query strings
+    * @param prefix    IN:  Part of the query that comes before IN list
+    * @param suffix    IN:  Part of the query that comes after IN list
+    * @param inList    IN:  the list with IN list values
+    * @param inColumn  IN:  single column name of IN list operator
+    * @param addParens IN:  add a pair of parenthesis outside the IN lists
+    *                       e.g. "(id in (1,2,3) OR id in (4,5,6))"
+    * @param notIn     IN:  is this for building a 'NOT IN' composite clause?
+    * @return          OUT: a list of the count of IN list values that are in each of the corresponding queries
+    */
+   public static List<Integer> buildQueryWithINClause(Configuration conf,
+                                             List<String> queries,
+                                             StringBuilder prefix,
+                                             StringBuilder suffix,
+                                             List<Long> inList,
+                                             String inColumn,
+                                             boolean addParens,
+                                             boolean notIn) {
+     List<String> inListStrings = new ArrayList<>(inList.size());
+     for (Long aLong : inList) {
+       inListStrings.add(aLong.toString());
+     }
+     return buildQueryWithINClauseStrings(conf, queries, prefix, suffix,
+         inListStrings, inColumn, addParens, notIn);
+ 
+   }
+   /**
+    * Build a query (or queries if one query is too big but only for the case of 'IN'
+    * composite clause. For the case of 'NOT IN' clauses, multiple queries change
+    * the semantics of the intended query.
+    * E.g., Let's assume that input "inList" parameter has [5, 6] and that
+    * _DIRECT_SQL_MAX_QUERY_LENGTH_ configuration parameter only allows one value in a 'NOT IN' clause,
+    * Then having two delete statements changes the semantics of the inteneded SQL statement.
+    * I.e. 'delete from T where a not in (5)' and 'delete from T where a not in (6)' sequence
+    * is not equal to 'delete from T where a not in (5, 6)'.)
+    * with one or multiple 'IN' or 'NOT IN' clauses with the given input parameters.
+    *
+    * Note that this method currently support only single column for
+    * IN/NOT IN clauses and that only covers OR-based composite 'IN' clause and
+    * AND-based composite 'NOT IN' clause.
+    * For example, for 'IN' clause case, the method will build a query with OR.
+    * E.g., "id in (1,2,3) OR id in (4,5,6)".
+    * For 'NOT IN' case, NOT IN list is broken into multiple 'NOT IN" clauses connected by AND.
+    *
+    * Note that, in this method, "a composite 'IN' clause" is defined as "a list of multiple 'IN'
+    * clauses in a query".
+    *
+    * @param queries   OUT: Array of query strings
+    * @param prefix    IN:  Part of the query that comes before IN list
+    * @param suffix    IN:  Part of the query that comes after IN list
+    * @param inList    IN:  the list with IN list values
+    * @param inColumn  IN:  single column name of IN list operator
+    * @param addParens IN:  add a pair of parenthesis outside the IN lists
+    *                       e.g. "(id in (1,2,3) OR id in (4,5,6))"
+    * @param notIn     IN:  is this for building a 'NOT IN' composite clause?
+    * @return          OUT: a list of the count of IN list values that are in each of the corresponding queries
+    */
+   public static List<Integer> buildQueryWithINClauseStrings(Configuration conf, List<String> queries, StringBuilder prefix,
+       StringBuilder suffix, List<String> inList, String inColumn, boolean addParens, boolean notIn) {
+     // Get configuration parameters
+     int maxQueryLength = MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH);
+     int batchSize = MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE);
+ 
+     // Check parameter set validity as a public method.
+     if (inList == null || inList.size() == 0 || maxQueryLength <= 0 || batchSize <= 0) {
+       throw new IllegalArgumentException("The IN list is empty!");
+     }
+ 
+     // Define constants and local variables.
+     int inListSize = inList.size();
+     StringBuilder buf = new StringBuilder();
+ 
+     int cursor4InListArray = 0,  // cursor for the "inList" array.
+         cursor4InClauseElements = 0,  // cursor for an element list per an 'IN'/'NOT IN'-clause.
+         cursor4queryOfInClauses = 0;  // cursor for in-clause lists per a query.
+     boolean nextItemNeeded = true;
+     boolean newInclausePrefixJustAppended = false;
+     StringBuilder nextValue = new StringBuilder("");
+     StringBuilder newInclausePrefix =
+       new StringBuilder(notIn ? " and " + inColumn + " not in (":
+ 	                        " or " + inColumn + " in (");
+     List<Integer> ret = new ArrayList<>();
+     int currentCount = 0;
+ 
+     // Loop over the given inList elements.
+     while( cursor4InListArray < inListSize || !nextItemNeeded) {
+       if (cursor4queryOfInClauses == 0) {
+         // Append prefix
+         buf.append(prefix);
+         if (addParens) {
+           buf.append("(");
+         }
+         buf.append(inColumn);
+ 
+         if (notIn) {
+           buf.append(" not in (");
+         } else {
+           buf.append(" in (");
+         }
+         cursor4queryOfInClauses++;
+         newInclausePrefixJustAppended = false;
+       }
+ 
+       // Get the next "inList" value element if needed.
+       if (nextItemNeeded) {
+         nextValue.setLength(0);
+         nextValue.append(String.valueOf(inList.get(cursor4InListArray++)));
+         nextItemNeeded = false;
+       }
+ 
+       // Compute the size of a query when the 'nextValue' is added to the current query.
+       int querySize = querySizeExpected(buf.length(), nextValue.length(), suffix.length(), addParens);
+ 
+       if (querySize > maxQueryLength * 1024) {
+         // Check an edge case where the DIRECT_SQL_MAX_QUERY_LENGTH does not allow one 'IN' clause with single value.
+         if (cursor4queryOfInClauses == 1 && cursor4InClauseElements == 0) {
+           throw new IllegalArgumentException("The current " + ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH.getVarname() + " is set too small to have one IN clause with single value!");
+         }
+ 
+         // Check en edge case to throw Exception if we can not build a single query for 'NOT IN' clause cases as mentioned at the method comments.
+         if (notIn) {
+           throw new IllegalArgumentException("The NOT IN list has too many elements for the current " + ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH.getVarname() + "!");
+         }
+ 
+         // Wrap up the current query string since we can not add another "inList" element value.
+         if (newInclausePrefixJustAppended) {
+           buf.delete(buf.length()-newInclausePrefix.length(), buf.length());
+         }
+ 
+         buf.setCharAt(buf.length() - 1, ')'); // replace the "commar" to finish a 'IN' clause string.
+ 
+         if (addParens) {
+           buf.append(")");
+         }
+ 
+         buf.append(suffix);
+         queries.add(buf.toString());
+         ret.add(currentCount);
+ 
+         // Prepare a new query string.
+         buf.setLength(0);
+         currentCount = 0;
+         cursor4queryOfInClauses = cursor4InClauseElements = 0;
+         querySize = 0;
+         newInclausePrefixJustAppended = false;
+         continue;
+       } else if (cursor4InClauseElements >= batchSize-1 && cursor4InClauseElements != 0) {
+         // Finish the current 'IN'/'NOT IN' clause and start a new clause.
+         buf.setCharAt(buf.length() - 1, ')'); // replace the "commar".
+         buf.append(newInclausePrefix.toString());
+ 
+         newInclausePrefixJustAppended = true;
+ 
+         // increment cursor for per-query IN-clause list
+         cursor4queryOfInClauses++;
+         cursor4InClauseElements = 0;
+       } else {
+         buf.append(nextValue.toString()).append(",");
+         currentCount++;
+         nextItemNeeded = true;
+         newInclausePrefixJustAppended = false;
+         // increment cursor for elements per 'IN'/'NOT IN' clause.
+         cursor4InClauseElements++;
+       }
+     }
+ 
+     // Finish the last query.
+     if (newInclausePrefixJustAppended) {
+         buf.delete(buf.length()-newInclausePrefix.length(), buf.length());
+       }
+     buf.setCharAt(buf.length() - 1, ')'); // replace the commar.
+     if (addParens) {
+       buf.append(")");
+     }
+     buf.append(suffix);
+     queries.add(buf.toString());
+     ret.add(currentCount);
+     return ret;
+   }
+ 
+   /**
+    * Compute and return the size of a query statement with the given parameters as input variables.
+    *
+    * @param sizeSoFar     size of the current contents of the buf
+    * @param sizeNextItem      size of the next 'IN' clause element value.
+    * @param suffixSize    size of the suffix for a quey statement
+    * @param addParens     Do we add an additional parenthesis?
+    */
+   private static int querySizeExpected(int sizeSoFar,
+                                        int sizeNextItem,
+                                        int suffixSize,
+                                        boolean addParens) {
+ 
+     int size = sizeSoFar + sizeNextItem + suffixSize;
+ 
+     if (addParens) {
+        size++;
+     }
+ 
+     return size;
+   }
+ }


[31/50] [abbrv] hive git commit: HIVE-20047 : remove txnID argument for txn stats methods (Sergey Shelukhin)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 70edb96..91c86a7 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -420,7 +420,7 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
 
   @Override
   public void alter_table(String catName, String dbName, String tbl_name, Table new_tbl,
-      EnvironmentContext envContext, long txnId, String validWriteIds)
+      EnvironmentContext envContext, String validWriteIds)
           throws InvalidOperationException, MetaException, TException {
     HiveMetaHook hook = getHook(new_tbl);
     if (hook != null) {
@@ -428,7 +428,6 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
     }
     AlterTableRequest req = new AlterTableRequest(dbName, tbl_name, new_tbl);
     req.setCatName(catName);
-    req.setTxnId(txnId);
     req.setValidWriteIdList(validWriteIds);
     req.setEnvironmentContext(envContext);
     client.alter_table_req(req);
@@ -438,15 +437,14 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
   @Override
   public void renamePartition(final String dbname, final String tableName, final List<String> part_vals,
                               final Partition newPart) throws TException {
-    renamePartition(getDefaultCatalog(conf), dbname, tableName, part_vals, newPart, -1, null);
+    renamePartition(getDefaultCatalog(conf), dbname, tableName, part_vals, newPart, null);
   }
 
   @Override
   public void renamePartition(String catName, String dbname, String tableName, List<String> part_vals,
-                              Partition newPart, long txnId, String validWriteIds) throws TException {
+                              Partition newPart, String validWriteIds) throws TException {
     RenamePartitionRequest req = new RenamePartitionRequest(dbname, tableName, part_vals, newPart);
     req.setCatName(catName);
-    req.setTxnId(txnId);
     req.setValidWriteIdList(validWriteIds);
     client.rename_partition_req(req);
   }
@@ -816,35 +814,34 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
   @Override
   public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
       String dbName, String tableName, List<String> partNames, List<String> colNames,
-      long txnId, String validWriteIdList)
+      String validWriteIdList)
       throws NoSuchObjectException, MetaException, TException {
     return getPartitionColumnStatistics(getDefaultCatalog(conf), dbName, tableName,
-        partNames, colNames, txnId, validWriteIdList);
+        partNames, colNames, validWriteIdList);
   }
 
   @Override
   public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
       String catName, String dbName, String tableName, List<String> partNames,
-      List<String> colNames, long txnId, String validWriteIdList)
+      List<String> colNames, String validWriteIdList)
       throws NoSuchObjectException, MetaException, TException {
     PartitionsStatsRequest rqst = new PartitionsStatsRequest(dbName, tableName, colNames,
         partNames);
     rqst.setCatName(catName);
-    rqst.setTxnId(txnId);
     rqst.setValidWriteIdList(validWriteIdList);
     return client.get_partitions_statistics_req(rqst).getPartStats();
   }
 
   @Override
   public AggrStats getAggrColStatsFor(String dbName, String tblName, List<String> colNames,
-      List<String> partNames, long txnId, String writeIdList)
+      List<String> partNames, String writeIdList)
       throws NoSuchObjectException, MetaException, TException {
     return getAggrColStatsFor(getDefaultCatalog(conf), dbName, tblName, colNames,
-        partNames, txnId, writeIdList);  }
+        partNames, writeIdList);  }
 
   @Override
   public AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, List<String> colNames,
-      List<String> partNames, long txnId, String writeIdList)
+      List<String> partNames, String writeIdList)
       throws NoSuchObjectException, MetaException, TException {
     if (colNames.isEmpty() || partNames.isEmpty()) {
       LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side.");
@@ -852,7 +849,6 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
     }
     PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames);
     req.setCatName(catName);
-    req.setTxnId(txnId);
     req.setValidWriteIdList(writeIdList);
     return client.get_aggr_stats_for(req);
   }
@@ -1462,29 +1458,28 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
 
   @Override
   public void truncateTable(String dbName, String tableName, List<String> partNames,
-      long txnId, String validWriteIds, long writeId) throws TException {
+      String validWriteIds, long writeId) throws TException {
     truncateTableInternal(getDefaultCatalog(conf),
-        dbName, tableName, partNames, txnId, validWriteIds, writeId);
+        dbName, tableName, partNames, validWriteIds, writeId);
   }
 
   @Override
   public void truncateTable(String dbName, String tableName, List<String> partNames) throws TException {
-    truncateTableInternal(getDefaultCatalog(conf), dbName, tableName, partNames, -1, null, -1);
+    truncateTableInternal(getDefaultCatalog(conf), dbName, tableName, partNames, null, -1);
   }
 
   @Override
   public void truncateTable(String catName, String dbName, String tableName, List<String> partNames)
       throws TException {
-    truncateTableInternal(catName, dbName, tableName, partNames, -1, null, -1);
+    truncateTableInternal(catName, dbName, tableName, partNames, null, -1);
   }
 
   private void truncateTableInternal(String catName, String dbName, String tableName,
-      List<String> partNames, long txnId, String validWriteIds, long writeId)
+      List<String> partNames, String validWriteIds, long writeId)
           throws MetaException, TException {
     TruncateTableRequest req = new TruncateTableRequest(
         prependCatalogToDbName(catName, dbName, conf), tableName);
     req.setPartNames(partNames);
-    req.setTxnId(txnId);
     req.setValidWriteIdList(validWriteIds);
     req.setWriteId(writeId);
     client.truncate_table_req(req);
@@ -1769,14 +1764,6 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
   }
 
   @Override
-  public Table getTable(String dbname, String name,
-                 long txnId, String validWriteIdList)
-      throws MetaException, TException, NoSuchObjectException{
-    return getTable(getDefaultCatalog(conf), dbname, name,
-        txnId, validWriteIdList);
-  };
-
-  @Override
   public Table getTable(String catName, String dbName, String tableName) throws TException {
     GetTableRequest req = new GetTableRequest(dbName, tableName);
     req.setCatName(catName);
@@ -1787,11 +1774,10 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
 
   @Override
   public Table getTable(String catName, String dbName, String tableName,
-    long txnId, String validWriteIdList) throws TException {
+    String validWriteIdList) throws TException {
     GetTableRequest req = new GetTableRequest(dbName, tableName);
     req.setCatName(catName);
     req.setCapabilities(version);
-    req.setTxnId(txnId);
     req.setValidWriteIdList(validWriteIdList);
     Table t = client.get_table_req(req).getTable();
     return deepCopy(filterHook.filterTable(t));
@@ -2027,12 +2013,11 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
 
   @Override
   public void alter_partition(String dbName, String tblName, Partition newPart,
-      EnvironmentContext environmentContext, long txnId, String writeIdList)
+      EnvironmentContext environmentContext, String writeIdList)
       throws InvalidOperationException, MetaException, TException {
     AlterPartitionsRequest req = new AlterPartitionsRequest(
         dbName, tblName, Lists.newArrayList(newPart));
     req.setEnvironmentContext(environmentContext);
-    req.setTxnId(txnId);
     req.setValidWriteIdList(writeIdList);
     client.alter_partitions_req(req);
   }
@@ -2042,23 +2027,23 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
   public void alter_partitions(String dbName, String tblName, List<Partition> newParts)
       throws TException {
     alter_partitions(
-        getDefaultCatalog(conf), dbName, tblName, newParts, new EnvironmentContext(), -1, null, -1);
+        getDefaultCatalog(conf), dbName, tblName, newParts, new EnvironmentContext(), null, -1);
   }
 
   @Override
   public void alter_partitions(String dbName, String tblName, List<Partition> newParts,
                                EnvironmentContext environmentContext) throws TException {
     alter_partitions(
-        getDefaultCatalog(conf), dbName, tblName, newParts, environmentContext, -1, null, -1);
+        getDefaultCatalog(conf), dbName, tblName, newParts, environmentContext, null, -1);
   }
 
   @Override
   public void alter_partitions(String dbName, String tblName, List<Partition> newParts,
                                EnvironmentContext environmentContext,
-                               long txnId, String writeIdList, long writeId)
+                               String writeIdList, long writeId)
       throws InvalidOperationException, MetaException, TException {
     alter_partitions(getDefaultCatalog(conf),
-        dbName, tblName, newParts, environmentContext, txnId, writeIdList, writeId);
+        dbName, tblName, newParts, environmentContext, writeIdList, writeId);
 
   }
 
@@ -2066,14 +2051,13 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
   public void alter_partitions(String catName, String dbName, String tblName,
                                List<Partition> newParts,
                                EnvironmentContext environmentContext,
-                               long txnId, String writeIdList, long writeId) throws TException {
+                               String writeIdList, long writeId) throws TException {
     AlterPartitionsRequest req = new AlterPartitionsRequest();
     req.setCatName(catName);
     req.setDbName(dbName);
     req.setTableName(tblName);
     req.setPartitions(newParts);
     req.setEnvironmentContext(environmentContext);
-    req.setTxnId(txnId);
     req.setValidWriteIdList(writeIdList);
     req.setWriteId(writeId);
     client.alter_partitions_req(req);
@@ -2218,21 +2202,18 @@ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
   @Override
   public List<ColumnStatisticsObj> getTableColumnStatistics(String dbName, String tableName,
                                                             List<String> colNames,
-                                                            long txnId,
                                                             String validWriteIdList) throws TException {
     return getTableColumnStatistics(getDefaultCatalog(conf), dbName, tableName, colNames,
-        txnId, validWriteIdList);
+        validWriteIdList);
   }
 
   @Override
   public List<ColumnStatisticsObj> getTableColumnStatistics(String catName, String dbName,
                                                             String tableName,
                                                             List<String> colNames,
-                                                            long txnId,
                                                             String validWriteIdList) throws TException {
     TableStatsRequest rqst = new TableStatsRequest(dbName, tableName, colNames);
     rqst.setCatName(catName);
-    rqst.setTxnId(txnId);
     rqst.setValidWriteIdList(validWriteIdList);
     return client.get_table_statistics_req(rqst).getTableStats();
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
index 3a65f77..cee3572 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
@@ -91,7 +91,7 @@ public interface IHMSHandler extends ThriftHiveMetastore.Iface, Configurable {
       throws MetaException, NoSuchObjectException;
 
   Table get_table_core(final String catName, final String dbname,
-                       final String name, final long txnId,
+                       final String name,
                        final String writeIdList)
       throws MetaException, NoSuchObjectException;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 70be8d8..91405b9 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -624,7 +624,7 @@ public interface IMetaStoreClient {
   void truncateTable(String dbName, String tableName, List<String> partNames) throws MetaException, TException;
 
   void truncateTable(String dbName, String tableName, List<String> partNames,
-      long txnId, String validWriteIds, long writeId) throws TException;
+      String validWriteIds, long writeId) throws TException;
 
   /**
    * Truncate the table/partitions in the DEFAULT database.
@@ -717,10 +717,6 @@ public interface IMetaStoreClient {
   Table getTable(String dbName, String tableName) throws MetaException,
       TException, NoSuchObjectException;
 
-  Table getTable(String dbName, String tableName,
-                 long txnId, String validWriteIdList)
-      throws MetaException, TException, NoSuchObjectException;
-
   /**
    * Get a table object.
    * @param catName catalog the table is in.
@@ -733,7 +729,7 @@ public interface IMetaStoreClient {
   Table getTable(String catName, String dbName, String tableName) throws MetaException, TException;
 
   Table getTable(String catName, String dbName, String tableName,
-                        long txnId, String validWriteIdList) throws TException;
+                        String validWriteIdList) throws TException;
   /**
    * Get tables as objects (rather than just fetching their names).  This is more expensive and
    * should only be used if you actually need all the information about the tables.
@@ -1650,7 +1646,7 @@ public interface IMetaStoreClient {
       TException;
 
   void alter_table(String catName, String databaseName, String tblName, Table table,
-      EnvironmentContext environmentContext, long txnId, String validWriteIdList)
+      EnvironmentContext environmentContext, String validWriteIdList)
           throws InvalidOperationException, MetaException, TException;
   /**
    * Create a new database.
@@ -2084,7 +2080,7 @@ public interface IMetaStoreClient {
 
 
   void alter_partition(String dbName, String tblName, Partition newPart,
-      EnvironmentContext environmentContext, long txnId, String writeIdList)
+      EnvironmentContext environmentContext, String writeIdList)
       throws InvalidOperationException, MetaException, TException;
 
   /**
@@ -2151,7 +2147,7 @@ public interface IMetaStoreClient {
 
   void alter_partitions(String dbName, String tblName, List<Partition> newParts,
                         EnvironmentContext environmentContext,
-                        long txnId, String writeIdList, long writeId)
+                        String writeIdList, long writeId)
       throws InvalidOperationException, MetaException, TException;
 
   /**
@@ -2174,7 +2170,7 @@ public interface IMetaStoreClient {
   default void alter_partitions(String catName, String dbName, String tblName,
                                 List<Partition> newParts)
       throws InvalidOperationException, MetaException, TException {
-    alter_partitions(catName, dbName, tblName, newParts, new EnvironmentContext(), -1, null, -1);
+    alter_partitions(catName, dbName, tblName, newParts, new EnvironmentContext(),  null, -1);
   }
 
   /**
@@ -2196,7 +2192,7 @@ public interface IMetaStoreClient {
    */
   void alter_partitions(String catName, String dbName, String tblName, List<Partition> newParts,
                         EnvironmentContext environmentContext,
-                        long txnId, String writeIdList, long writeId)
+                        String writeIdList, long writeId)
       throws InvalidOperationException, MetaException, TException;
 
   /**
@@ -2241,7 +2237,7 @@ public interface IMetaStoreClient {
    *          if error in communicating with metastore server
    */
   void renamePartition(String catName, String dbname, String tableName, List<String> part_vals,
-                       Partition newPart, long txnId, String validWriteIds)
+                       Partition newPart, String validWriteIds)
       throws InvalidOperationException, MetaException, TException;
 
   /**
@@ -2380,7 +2376,6 @@ public interface IMetaStoreClient {
 
   List<ColumnStatisticsObj> getTableColumnStatistics(String dbName, String tableName,
                                                      List<String> colNames,
-                                                     long txnId,
                                                      String validWriteIdList)
       throws NoSuchObjectException, MetaException, TException;
 
@@ -2403,7 +2398,6 @@ public interface IMetaStoreClient {
 
   List<ColumnStatisticsObj> getTableColumnStatistics(String catName, String dbName, String tableName,
                                                      List<String> colNames,
-                                                     long txnId,
                                                      String validWriteIdList)
       throws NoSuchObjectException, MetaException, TException;
   /**
@@ -2424,7 +2418,7 @@ public interface IMetaStoreClient {
 
   Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(String dbName,
       String tableName,  List<String> partNames, List<String> colNames,
-      long txnId, String validWriteIdList)
+      String validWriteIdList)
       throws NoSuchObjectException, MetaException, TException;
 
   /**
@@ -2447,7 +2441,7 @@ public interface IMetaStoreClient {
   Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
       String catName, String dbName, String tableName,
       List<String> partNames, List<String> colNames,
-      long txnId, String validWriteIdList)
+      String validWriteIdList)
       throws NoSuchObjectException, MetaException, TException;
   /**
    * Delete partition level column statistics given dbName, tableName, partName and colName, or
@@ -3300,7 +3294,7 @@ public interface IMetaStoreClient {
 
   AggrStats getAggrColStatsFor(String dbName, String tblName,
       List<String> colNames, List<String> partName,
-      long txnId, String writeIdList)  throws NoSuchObjectException, MetaException, TException;
+      String writeIdList)  throws NoSuchObjectException, MetaException, TException;
 
   /**
    * Get aggregated column stats for a set of partitions.
@@ -3320,7 +3314,7 @@ public interface IMetaStoreClient {
 
   AggrStats getAggrColStatsFor(String catName, String dbName, String tblName,
                                List<String> colNames, List<String> partNames,
-                               long txnId, String writeIdList)
+                               String writeIdList)
       throws NoSuchObjectException, MetaException, TException;
   /**
    * Set table or partition column statistics.

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 9eb8424..b319e68 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -1412,12 +1412,12 @@ public class ObjectStore implements RawStore, Configurable {
   public Table
   getTable(String catName, String dbName, String tableName)
       throws MetaException {
-    return getTable(catName, dbName, tableName, -1, null);
+    return getTable(catName, dbName, tableName, null);
   }
 
   @Override
   public Table getTable(String catName, String dbName, String tableName,
-                        long txnId, String writeIdList)
+                        String writeIdList)
       throws MetaException {
     boolean commited = false;
     Table tbl = null;
@@ -1441,7 +1441,7 @@ public class ObjectStore implements RawStore, Configurable {
           StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE);
           LOG.info("Removed COLUMN_STATS_ACCURATE from Table's parameters.");
         } else if (isTxn && tbl.getPartitionKeysSize() == 0) {
-          if (isCurrentStatsValidForTheQuery(mtable, txnId, writeIdList, false)) {
+          if (isCurrentStatsValidForTheQuery(mtable, writeIdList, false)) {
             tbl.setIsStatsCompliant(true);
           } else {
             tbl.setIsStatsCompliant(false);
@@ -2427,13 +2427,13 @@ public class ObjectStore implements RawStore, Configurable {
   @Override
   public Partition getPartition(String catName, String dbName, String tableName,
       List<String> part_vals) throws NoSuchObjectException, MetaException {
-    return getPartition(catName, dbName, tableName, part_vals, -1, null);
+    return getPartition(catName, dbName, tableName, part_vals, null);
   }
 
   @Override
   public Partition getPartition(String catName, String dbName, String tableName,
                                 List<String> part_vals,
-                                long txnId, String writeIdList)
+                                String writeIdList)
       throws NoSuchObjectException, MetaException {
     openTransaction();
     MTable table = this.getMTable(catName, dbName, tableName);
@@ -2453,7 +2453,7 @@ public class ObjectStore implements RawStore, Configurable {
         StatsSetupConst.setBasicStatsState(part.getParameters(), StatsSetupConst.FALSE);
         LOG.info("Removed COLUMN_STATS_ACCURATE from Partition object's parameters.");
       } else if (writeIdList != null) {
-        if (isCurrentStatsValidForTheQuery(part, mpart.getWriteId(), txnId, writeIdList, false)) {
+        if (isCurrentStatsValidForTheQuery(part, mpart.getWriteId(), writeIdList, false)) {
           part.setIsStatsCompliant(true);
         } else {
           part.setIsStatsCompliant(false);
@@ -3022,7 +3022,7 @@ public class ObjectStore implements RawStore, Configurable {
         TableName.getQualified(catName, dbName, tableName), filter, cols);
     List<String> partitionNames = null;
     List<Partition> partitions = null;
-    Table tbl = getTable(catName, dbName, tableName, -1, null);
+    Table tbl = getTable(catName, dbName, tableName, null);
     try {
       // Get partitions by name - ascending or descending
       partitionNames = getPartitionNamesByFilter(catName, dbName, tableName, filter, ascending,
@@ -3156,7 +3156,7 @@ public class ObjectStore implements RawStore, Configurable {
         partValuesSelect.append("DISTINCT ");
       }
       List<FieldSchema> partitionKeys =
-          getTable(catName, dbName, tableName, -1, null).getPartitionKeys();
+          getTable(catName, dbName, tableName, null).getPartitionKeys();
       for (FieldSchema key : cols) {
         partValuesSelect.append(extractPartitionKey(key, partitionKeys)).append(", ");
       }
@@ -3238,7 +3238,7 @@ public class ObjectStore implements RawStore, Configurable {
     catName = normalizeIdentifier(catName);
     dbName = normalizeIdentifier(dbName);
     tableName = normalizeIdentifier(tableName);
-    Table table = getTable(catName, dbName, tableName, -1, null);
+    Table table = getTable(catName, dbName, tableName, null);
     if (table == null) {
       throw new NoSuchObjectException(TableName.getQualified(catName, dbName, tableName)
           + " table not found");
@@ -4092,7 +4092,7 @@ public class ObjectStore implements RawStore, Configurable {
 
   @Override
   public void alterTable(String catName, String dbname, String name, Table newTable,
-      long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
+      String queryValidWriteIds) throws InvalidObjectException, MetaException {
     boolean success = false;
     boolean registerCreationSignature = false;
     try {
@@ -4151,7 +4151,7 @@ public class ObjectStore implements RawStore, Configurable {
           StatsSetupConst.setBasicStatsState(oldt.getParameters(), StatsSetupConst.FALSE);
         } else if (queryValidWriteIds != null && (!isToTxn || newTable.getWriteId() > 0)) {
           // Check concurrent INSERT case and set false to the flag.
-          if (!isCurrentStatsValidForTheQuery(oldt, queryTxnId, queryValidWriteIds, true)) {
+          if (!isCurrentStatsValidForTheQuery(oldt, queryValidWriteIds, true)) {
             StatsSetupConst.setBasicStatsState(oldt.getParameters(), StatsSetupConst.FALSE);
             LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the table " +
                     dbname + "." + name + ". will be made persistent.");
@@ -4231,7 +4231,7 @@ public class ObjectStore implements RawStore, Configurable {
    * @throws MetaException
    */
   private MColumnDescriptor alterPartitionNoTxn(String catName, String dbname, String name,
-    List<String> part_vals, Partition newPart, long queryTxnId, String queryValidWriteIds)
+    List<String> part_vals, Partition newPart, String queryValidWriteIds)
       throws InvalidObjectException, MetaException {
     catName = normalizeIdentifier(catName);
     name = normalizeIdentifier(name);
@@ -4276,7 +4276,7 @@ public class ObjectStore implements RawStore, Configurable {
         StatsSetupConst.setBasicStatsState(oldp.getParameters(), StatsSetupConst.FALSE);
       } else if (queryValidWriteIds != null && newPart.getWriteId() > 0) {
         // Check concurrent INSERT case and set false to the flag.
-        if (!isCurrentStatsValidForTheQuery(oldp, queryTxnId, queryValidWriteIds, true)) {
+        if (!isCurrentStatsValidForTheQuery(oldp, queryValidWriteIds, true)) {
           StatsSetupConst.setBasicStatsState(oldp.getParameters(), StatsSetupConst.FALSE);
           LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the partition " +
                   dbname + "." + name + "." + oldp.getPartitionName() + " will be made persistent.");
@@ -4290,7 +4290,7 @@ public class ObjectStore implements RawStore, Configurable {
 
   @Override
   public void alterPartition(String catName, String dbname, String name, List<String> part_vals,
-      Partition newPart, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
+      Partition newPart, String queryValidWriteIds) throws InvalidObjectException, MetaException {
     boolean success = false;
     Throwable e = null;
     try {
@@ -4299,7 +4299,7 @@ public class ObjectStore implements RawStore, Configurable {
         LOG.warn("Alter partitions with write ID called without transaction information");
       }
       MColumnDescriptor oldCd = alterPartitionNoTxn(
-          catName, dbname, name, part_vals, newPart, queryTxnId, queryValidWriteIds);
+          catName, dbname, name, part_vals, newPart, queryValidWriteIds);
       removeUnusedColumnDescriptor(oldCd);
       // commit the changes
       success = commitTransaction();
@@ -4322,7 +4322,7 @@ public class ObjectStore implements RawStore, Configurable {
   @Override
   public void alterPartitions(String catName, String dbname, String name,
                               List<List<String>> part_vals, List<Partition> newParts,
-                              long writeId, long queryTxnId, String queryWriteIdList)
+                              long writeId, String queryWriteIdList)
                                   throws InvalidObjectException, MetaException {
     boolean success = false;
     Exception e = null;
@@ -4336,7 +4336,7 @@ public class ObjectStore implements RawStore, Configurable {
           tmpPart.setWriteId(writeId);
         }
         MColumnDescriptor oldCd = alterPartitionNoTxn(
-            catName, dbname, name, tmpPartVals, tmpPart, queryTxnId, queryWriteIdList);
+            catName, dbname, name, tmpPartVals, tmpPart, queryWriteIdList);
         if (oldCd != null) {
           oldCds.add(oldCd);
         }
@@ -6215,7 +6215,7 @@ public class ObjectStore implements RawStore, Configurable {
             boolean found = false;
             Table tabObj =
                 this.getTable(catName, hiveObject.getDbName(),
-                     hiveObject.getObjectName(), -1, null);
+                     hiveObject.getObjectName(), null);
             String partName = null;
             if (hiveObject.getPartValues() != null) {
               partName = Warehouse.makePartName(tabObj.getPartitionKeys(), hiveObject.getPartValues());
@@ -6249,7 +6249,7 @@ public class ObjectStore implements RawStore, Configurable {
           } else if (hiveObject.getObjectType() == HiveObjectType.COLUMN) {
 
             Table tabObj = this.getTable(catName, hiveObject.getDbName(), hiveObject
-                .getObjectName(), -1, null);
+                .getObjectName(), null);
             String partName = null;
             if (hiveObject.getPartValues() != null) {
               partName = Warehouse.makePartName(tabObj.getPartitionKeys(),
@@ -7771,7 +7771,7 @@ public class ObjectStore implements RawStore, Configurable {
       query
           .declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3, int t4," +
               "java.lang.String t5");
-      Table tbl = getTable(catName, dbName, tblName, -1, null); // Make sure dbName and tblName are valid.
+      Table tbl = getTable(catName, dbName, tblName, null); // Make sure dbName and tblName are valid.
       if (null == tbl) {
         throw new UnknownTableException("Table: " + tblName + " is not found.");
       }
@@ -7797,7 +7797,7 @@ public class ObjectStore implements RawStore, Configurable {
     Table tbl = null;
     try{
     openTransaction();
-    tbl = getTable(catName, dbName, tblName, -1, null); // Make sure dbName and tblName are valid.
+    tbl = getTable(catName, dbName, tblName, null); // Make sure dbName and tblName are valid.
     if(null == tbl) {
       throw new UnknownTableException("Table: "+ tblName + " is not found.");
     }
@@ -8409,7 +8409,7 @@ public class ObjectStore implements RawStore, Configurable {
 
   @Override
   public boolean updateTableColumnStatistics(ColumnStatistics colStats,
-      long txnId, String validWriteIds, long writeId)
+      String validWriteIds, long writeId)
     throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
     boolean committed = false;
 
@@ -8455,7 +8455,7 @@ public class ObjectStore implements RawStore, Configurable {
           if (errorMsg != null) {
             throw new MetaException(errorMsg);
           }
-          if (!isCurrentStatsValidForTheQuery(oldt, txnId, validWriteIds, true)) {
+          if (!isCurrentStatsValidForTheQuery(oldt, validWriteIds, true)) {
             // Make sure we set the flag to invalid regardless of the current value.
             StatsSetupConst.setBasicStatsState(newParams, StatsSetupConst.FALSE);
             LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the table "
@@ -8503,7 +8503,7 @@ public class ObjectStore implements RawStore, Configurable {
 
   @Override
   public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, List<String> partVals,
-      long txnId, String validWriteIds, long writeId)
+      String validWriteIds, long writeId)
           throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException {
     boolean committed = false;
 
@@ -8549,7 +8549,7 @@ public class ObjectStore implements RawStore, Configurable {
           if (errorMsg != null) {
             throw new MetaException(errorMsg);
           }
-          if (!isCurrentStatsValidForTheQuery(mPartition, txnId, validWriteIds, true)) {
+          if (!isCurrentStatsValidForTheQuery(mPartition, validWriteIds, true)) {
             // Make sure we set the flag to invalid regardless of the current value.
             StatsSetupConst.setBasicStatsState(newParams, StatsSetupConst.FALSE);
             LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the partition "
@@ -8660,7 +8660,6 @@ public class ObjectStore implements RawStore, Configurable {
       String dbName,
       String tableName,
       List<String> colNames,
-      long txnId,
       String writeIdList) throws MetaException, NoSuchObjectException {
     // If the current stats in the metastore doesn't comply with
     // the isolation level of the query, set No to the compliance flag.
@@ -8668,7 +8667,7 @@ public class ObjectStore implements RawStore, Configurable {
     if (writeIdList != null) {
       MTable table = this.getMTable(catName, dbName, tableName);
       isCompliant = !TxnUtils.isTransactionalTable(table.getParameters())
-        || (areTxnStatsSupported && isCurrentStatsValidForTheQuery(table, txnId, writeIdList, false));
+        || (areTxnStatsSupported && isCurrentStatsValidForTheQuery(table, writeIdList, false));
     }
     ColumnStatistics stats = getTableColumnStatisticsInternal(
         catName, dbName, tableName, colNames, true, true);
@@ -8730,7 +8729,7 @@ public class ObjectStore implements RawStore, Configurable {
   public List<ColumnStatistics> getPartitionColumnStatistics(
       String catName, String dbName, String tableName,
       List<String> partNames, List<String> colNames,
-      long txnId, String writeIdList)
+      String writeIdList)
       throws MetaException, NoSuchObjectException {
     if (partNames == null && partNames.isEmpty()) {
       return null;
@@ -8748,11 +8747,11 @@ public class ObjectStore implements RawStore, Configurable {
           MPartition mpart = getMPartition(catName, dbName, tableName,
               Warehouse.getPartValuesFromPartName(cs.getStatsDesc().getPartName()));
           if (mpart == null
-              || !isCurrentStatsValidForTheQuery(mpart, txnId, writeIdList, false)) {
+              || !isCurrentStatsValidForTheQuery(mpart, writeIdList, false)) {
             if (mpart != null) {
               LOG.debug("The current metastore transactional partition column statistics for {}.{}.{} "
                 + "(write ID {}) are not valid for current query ({} {})", dbName, tableName,
-                mpart.getPartitionName(), mpart.getWriteId(), txnId, writeIdList);
+                mpart.getPartitionName(), mpart.getWriteId(), writeIdList);
             }
             cs.setIsStatsCompliant(false);
           } else {
@@ -8815,7 +8814,7 @@ public class ObjectStore implements RawStore, Configurable {
   @Override
   public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName,
       final List<String> partNames, final List<String> colNames,
-      long txnId, String writeIdList) throws MetaException, NoSuchObjectException {
+      String writeIdList) throws MetaException, NoSuchObjectException {
     // If the current stats in the metastore doesn't comply with
     // the isolation level of the query, return null.
     if (writeIdList != null) {
@@ -8836,7 +8835,7 @@ public class ObjectStore implements RawStore, Configurable {
       for (String partName : partNames) {
         MPartition mpart = getMPartition(
             catName, dbName, tblName, Warehouse.getPartValuesFromPartName(partName));
-        if (!isCurrentStatsValidForTheQuery(mpart, txnId, writeIdList, false)) {
+        if (!isCurrentStatsValidForTheQuery(mpart, writeIdList, false)) {
           LOG.debug("The current metastore transactional partition column statistics " +
                   "for " + dbName + "." + tblName + "." + mpart.getPartitionName() + " is not valid " +
                   "for the current query.");
@@ -12419,10 +12418,10 @@ public class ObjectStore implements RawStore, Configurable {
    * @param queryWriteId           writeId of the query
    * @Precondition   "tbl" should be retrieved from the TBLS table.
    */
-  private boolean isCurrentStatsValidForTheQuery(MTable tbl, long queryTxnId, String queryValidWriteIdList,
+  private boolean isCurrentStatsValidForTheQuery(MTable tbl, String queryValidWriteIdList,
       boolean isCompleteStatsWriter) throws MetaException {
     return isCurrentStatsValidForTheQuery(conf, tbl.getDatabase().getName(), tbl.getTableName(),
-        tbl.getParameters(), tbl.getWriteId(), queryTxnId, queryValidWriteIdList, isCompleteStatsWriter);
+        tbl.getParameters(), tbl.getWriteId(), queryValidWriteIdList, isCompleteStatsWriter);
   }
 
   /**
@@ -12439,30 +12438,30 @@ public class ObjectStore implements RawStore, Configurable {
    * @param queryValidWriteIdList  valid writeId list of the query
    * @Precondition   "part" should be retrieved from the PARTITIONS table.
    */
-  private boolean isCurrentStatsValidForTheQuery(MPartition part, long queryTxnId,
+  private boolean isCurrentStatsValidForTheQuery(MPartition part,
       String queryValidWriteIdList, boolean isCompleteStatsWriter)
       throws MetaException {
     return isCurrentStatsValidForTheQuery(conf, part.getTable().getDatabase().getName(),
         part.getTable().getTableName(), part.getParameters(), part.getWriteId(),
-        queryTxnId, queryValidWriteIdList, isCompleteStatsWriter);
+        queryValidWriteIdList, isCompleteStatsWriter);
   }
 
-  private boolean isCurrentStatsValidForTheQuery(Partition part, long partWriteId, long queryTxnId,
+  private boolean isCurrentStatsValidForTheQuery(Partition part, long partWriteId,
       String queryValidWriteIdList, boolean isCompleteStatsWriter)
       throws MetaException {
     return isCurrentStatsValidForTheQuery(conf, part.getDbName(), part.getTableName(),
-        part.getParameters(), partWriteId, queryTxnId, queryValidWriteIdList, isCompleteStatsWriter);
+        part.getParameters(), partWriteId, queryValidWriteIdList, isCompleteStatsWriter);
   }
 
   // TODO: move to somewhere else
   public static boolean isCurrentStatsValidForTheQuery(Configuration conf, String dbName,
-      String tblName, Map<String, String> statsParams, long statsWriteId, long queryTxnId,
+      String tblName, Map<String, String> statsParams, long statsWriteId,
       String queryValidWriteIdList, boolean isCompleteStatsWriter) throws MetaException {
 
     // Note: can be changed to debug/info to verify the calls.
     // TODO## change this to debug when merging
-    LOG.info("isCurrentStatsValidForTheQuery with stats write ID {}; query {}, {}; writer: {} params {}",
-        statsWriteId, queryTxnId, queryValidWriteIdList, isCompleteStatsWriter, statsParams);
+    LOG.info("isCurrentStatsValidForTheQuery with stats write ID {}; query {}; writer: {} params {}",
+        statsWriteId, queryValidWriteIdList, isCompleteStatsWriter, statsParams);
     // return true since the stats does not seem to be transactional.
     if (statsWriteId < 1) {
       return true;
@@ -12495,15 +12494,6 @@ public class ObjectStore implements RawStore, Configurable {
       }
     }
 
-    if (queryTxnId < 1) {
-      return false; // The caller is outside of a txn; no need to check the same-txn case.
-    }
-
-    // This assumes that all writes within the same txn are sequential and can see each other.
-    // TODO## Not clear if we need this check; each next write should have the previous
-    //        one in its writeIdList; verify w/Eugene.
-    long statsTxnId = HiveMetaStore.HMSHandler.getMsThreadTxnHandler(conf).getTxnIdForWriteId(
-        dbName, tblName, statsWriteId);
-    return (statsTxnId == queryTxnId);
+    return false;
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
index 95e8445..46082a5 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -221,7 +221,7 @@ public interface RawStore extends Configurable {
    * @throws MetaException something went wrong in the RDBMS
    */
   Table getTable(String catalogName, String dbName, String tableName,
-                 long txnId, String writeIdList) throws MetaException;
+                 String writeIdList) throws MetaException;
 
   /**
    * Add a partition.
@@ -289,7 +289,7 @@ public interface RawStore extends Configurable {
    */
   Partition getPartition(String catName, String dbName, String tableName,
                          List<String> part_vals,
-                         long txnId, String writeIdList)
+                         String writeIdList)
       throws MetaException, NoSuchObjectException;
 
   /**
@@ -362,7 +362,7 @@ public interface RawStore extends Configurable {
    * @throws MetaException something went wrong, usually in the RDBMS or storage.
    */
   void alterTable(String catName, String dbname, String name, Table newTable,
-      long queryTxnId, String queryValidWriteIds)
+      String queryValidWriteIds)
       throws InvalidObjectException, MetaException;
 
   /**
@@ -503,7 +503,7 @@ public interface RawStore extends Configurable {
    * @throws MetaException error accessing the RDBMS.
    */
   void alterPartition(String catName, String db_name, String tbl_name, List<String> part_vals,
-      Partition new_part, long queryTxnId, String queryValidWriteIds)
+      Partition new_part, String queryValidWriteIds)
           throws InvalidObjectException, MetaException;
 
   /**
@@ -524,7 +524,7 @@ public interface RawStore extends Configurable {
    */
   void alterPartitions(String catName, String db_name, String tbl_name,
       List<List<String>> part_vals_list, List<Partition> new_parts, long writeId,
-      long queryTxnId, String queryValidWriteIds)
+      String queryValidWriteIds)
       throws InvalidObjectException, MetaException;
 
   /**
@@ -864,7 +864,7 @@ public interface RawStore extends Configurable {
    * @throws InvalidObjectException the stats object is invalid
    * @throws InvalidInputException unable to record the stats for the table
    */
-  boolean updateTableColumnStatistics(ColumnStatistics colStats, long txnId, String validWriteIds, long writeId)
+  boolean updateTableColumnStatistics(ColumnStatistics colStats, String validWriteIds, long writeId)
       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException;
 
   /** Persists the given column statistics object to the metastore
@@ -878,7 +878,7 @@ public interface RawStore extends Configurable {
    * @throws TException
    */
   boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,
-     List<String> partVals, long txnId, String validWriteIds, long writeId)
+     List<String> partVals, String validWriteIds, long writeId)
      throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException;
 
   /**
@@ -912,7 +912,7 @@ public interface RawStore extends Configurable {
    */
   ColumnStatistics getTableColumnStatistics(
     String catName, String dbName, String tableName,
-    List<String> colName, long txnId, String writeIdList)
+    List<String> colName, String writeIdList)
       throws MetaException, NoSuchObjectException;
 
   /**
@@ -946,7 +946,7 @@ public interface RawStore extends Configurable {
   List<ColumnStatistics> getPartitionColumnStatistics(
       String catName, String dbName, String tblName,
       List<String> partNames, List<String> colNames,
-      long txnId, String writeIdList)
+      String writeIdList)
       throws MetaException, NoSuchObjectException;
 
   /**
@@ -1208,7 +1208,7 @@ public interface RawStore extends Configurable {
    */
   AggrStats get_aggr_stats_for(String catName, String dbName, String tblName,
     List<String> partNames, List<String> colNames,
-    long txnId, String writeIdList)
+    String writeIdList)
       throws MetaException, NoSuchObjectException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index 9bee0db..dd705a5 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -825,20 +825,20 @@ public class CachedStore implements RawStore, Configurable {
 
   @Override
   public Table getTable(String catName, String dbName, String tblName) throws MetaException {
-    return getTable(catName, dbName, tblName, -1, null);
+    return getTable(catName, dbName, tblName, null);
   }
 
   // TODO: if writeIdList is not null, check isolation level compliance for SVS,
   // possibly with getTableFromCache() with table snapshot in cache.
   @Override
   public Table getTable(String catName, String dbName, String tblName,
-                        long txnId, String writeIdList)
+                        String writeIdList)
       throws MetaException {
     catName = normalizeIdentifier(catName);
     dbName = StringUtils.normalizeIdentifier(dbName);
     tblName = StringUtils.normalizeIdentifier(tblName);
     if (!shouldCacheTable(catName, dbName, tblName)) {
-      return rawStore.getTable(catName, dbName, tblName, txnId,writeIdList);
+      return rawStore.getTable(catName, dbName, tblName, writeIdList);
     }
     Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
     if (tbl == null || writeIdList != null) {
@@ -847,7 +847,7 @@ public class CachedStore implements RawStore, Configurable {
       // let's move this table to the top of tblNamesBeingPrewarmed stack,
       // so that it gets loaded to the cache faster and is available for subsequent requests
       tblsPendingPrewarm.prioritizeTableForPrewarm(tblName);
-      return rawStore.getTable(catName, dbName, tblName, txnId, writeIdList);
+      return rawStore.getTable(catName, dbName, tblName, writeIdList);
     }
     if (tbl != null) {
       tbl.unsetPrivileges();
@@ -910,26 +910,26 @@ public class CachedStore implements RawStore, Configurable {
   @Override
   public Partition getPartition(String catName, String dbName, String tblName, List<String> part_vals)
       throws MetaException, NoSuchObjectException {
-    return getPartition(catName, dbName, tblName, part_vals, -1, null);
+    return getPartition(catName, dbName, tblName, part_vals, null);
   }
 
   // TODO: the same as getTable()
   @Override
   public Partition getPartition(String catName, String dbName, String tblName,
-                                List<String> part_vals, long txnId, String writeIdList)
+                                List<String> part_vals, String writeIdList)
       throws MetaException, NoSuchObjectException {
     catName = normalizeIdentifier(catName);
     dbName = StringUtils.normalizeIdentifier(dbName);
     tblName = StringUtils.normalizeIdentifier(tblName);
     if (!shouldCacheTable(catName, dbName, tblName)) {
       return rawStore.getPartition(
-          catName, dbName, tblName, part_vals, txnId, writeIdList);
+          catName, dbName, tblName, part_vals, writeIdList);
     }
     Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, part_vals);
     if (part == null || writeIdList != null) {
       // The table containing the partition is not yet loaded in cache
       return rawStore.getPartition(
-          catName, dbName, tblName, part_vals, txnId, writeIdList);
+          catName, dbName, tblName, part_vals, writeIdList);
     }
     return part;
   }
@@ -1011,8 +1011,8 @@ public class CachedStore implements RawStore, Configurable {
 
   @Override
   public void alterTable(String catName, String dbName, String tblName, Table newTable,
-      long txnId, String validWriteIds) throws InvalidObjectException, MetaException {
-    rawStore.alterTable(catName, dbName, tblName, newTable, txnId, validWriteIds);
+      String validWriteIds) throws InvalidObjectException, MetaException {
+    rawStore.alterTable(catName, dbName, tblName, newTable, validWriteIds);
     catName = normalizeIdentifier(catName);
     dbName = normalizeIdentifier(dbName);
     tblName = normalizeIdentifier(tblName);
@@ -1162,9 +1162,9 @@ public class CachedStore implements RawStore, Configurable {
 
   @Override
   public void alterPartition(String catName, String dbName, String tblName, List<String> partVals,
-                             Partition newPart, long queryTxnId, String queryValidWriteIds)
+                             Partition newPart, String queryValidWriteIds)
                                  throws InvalidObjectException, MetaException {
-    rawStore.alterPartition(catName, dbName, tblName, partVals, newPart, queryTxnId, queryValidWriteIds);
+    rawStore.alterPartition(catName, dbName, tblName, partVals, newPart, queryValidWriteIds);
     catName = normalizeIdentifier(catName);
     dbName = normalizeIdentifier(dbName);
     tblName = normalizeIdentifier(tblName);
@@ -1177,10 +1177,10 @@ public class CachedStore implements RawStore, Configurable {
   @Override
   public void alterPartitions(String catName, String dbName, String tblName,
                               List<List<String>> partValsList, List<Partition> newParts,
-                              long writeId, long txnId, String validWriteIds)
+                              long writeId, String validWriteIds)
       throws InvalidObjectException, MetaException {
     rawStore.alterPartitions(
-        catName, dbName, tblName, partValsList, newParts, writeId, txnId, validWriteIds);
+        catName, dbName, tblName, partValsList, newParts, writeId, validWriteIds);
     catName = normalizeIdentifier(catName);
     dbName = normalizeIdentifier(dbName);
     tblName = normalizeIdentifier(tblName);
@@ -1599,9 +1599,9 @@ public class CachedStore implements RawStore, Configurable {
   }
 
   @Override
-  public boolean updateTableColumnStatistics(ColumnStatistics colStats, long txnId, String validWriteIds, long writeId)
+  public boolean updateTableColumnStatistics(ColumnStatistics colStats, String validWriteIds, long writeId)
       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
-    boolean succ = rawStore.updateTableColumnStatistics(colStats, txnId, validWriteIds, writeId);
+    boolean succ = rawStore.updateTableColumnStatistics(colStats, validWriteIds, writeId);
     if (succ) {
       String catName = colStats.getStatsDesc().isSetCatName() ?
           normalizeIdentifier(colStats.getStatsDesc().getCatName()) :
@@ -1631,27 +1631,27 @@ public class CachedStore implements RawStore, Configurable {
   @Override
   public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tblName,
       List<String> colNames) throws MetaException, NoSuchObjectException {
-    return getTableColumnStatistics(catName, dbName, tblName, colNames, -1, null);
+    return getTableColumnStatistics(catName, dbName, tblName, colNames, null);
   }
 
   // TODO: the same as getTable()
   @Override
   public ColumnStatistics getTableColumnStatistics(
       String catName, String dbName, String tblName, List<String> colNames,
-      long txnId, String writeIdList)
+      String writeIdList)
       throws MetaException, NoSuchObjectException {
     catName = StringUtils.normalizeIdentifier(catName);
     dbName = StringUtils.normalizeIdentifier(dbName);
     tblName = StringUtils.normalizeIdentifier(tblName);
     if (!shouldCacheTable(catName, dbName, tblName)) {
       return rawStore.getTableColumnStatistics(
-          catName, dbName, tblName, colNames, txnId, writeIdList);
+          catName, dbName, tblName, colNames, writeIdList);
     }
     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
     if (table == null || writeIdList != null) {
       // The table is not yet loaded in cache
       return rawStore.getTableColumnStatistics(
-          catName, dbName, tblName, colNames, txnId, writeIdList);
+          catName, dbName, tblName, colNames, writeIdList);
     }
     ColumnStatisticsDesc csd = new ColumnStatisticsDesc(true, dbName, tblName);
     List<ColumnStatisticsObj> colStatObjs =
@@ -1678,9 +1678,9 @@ public class CachedStore implements RawStore, Configurable {
 
   @Override
   public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, List<String> partVals,
-      long txnId, String validWriteIds, long writeId)
+      String validWriteIds, long writeId)
       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
-    boolean succ = rawStore.updatePartitionColumnStatistics(colStats, partVals, txnId, validWriteIds, writeId);
+    boolean succ = rawStore.updatePartitionColumnStatistics(colStats, partVals, validWriteIds, writeId);
     if (succ) {
       String catName = colStats.getStatsDesc().isSetCatName() ?
           normalizeIdentifier(colStats.getStatsDesc().getCatName()) : DEFAULT_CATALOG_NAME;
@@ -1712,10 +1712,10 @@ public class CachedStore implements RawStore, Configurable {
   @Override
   public List<ColumnStatistics> getPartitionColumnStatistics(
       String catName, String dbName, String tblName, List<String> partNames,
-      List<String> colNames, long txnId, String writeIdList)
+      List<String> colNames, String writeIdList)
       throws MetaException, NoSuchObjectException {
     return rawStore.getPartitionColumnStatistics(
-        catName, dbName, tblName, partNames, colNames, txnId, writeIdList);
+        catName, dbName, tblName, partNames, colNames, writeIdList);
   }
 
   @Override
@@ -1739,14 +1739,14 @@ public class CachedStore implements RawStore, Configurable {
   @Override
   public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List<String> partNames,
       List<String> colNames) throws MetaException, NoSuchObjectException {
-    return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, -1, null);
+    return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, null);
   }
 
   @Override
   // TODO: the same as getTable() for transactional stats.
   public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName,
                                       List<String> partNames, List<String> colNames,
-                                      long txnId, String writeIdList)
+                                      String writeIdList)
       throws MetaException, NoSuchObjectException {
     List<ColumnStatisticsObj> colStats;
     catName = normalizeIdentifier(catName);
@@ -1754,13 +1754,13 @@ public class CachedStore implements RawStore, Configurable {
     tblName = StringUtils.normalizeIdentifier(tblName);
     if (!shouldCacheTable(catName, dbName, tblName)) {
       rawStore.get_aggr_stats_for(
-          catName, dbName, tblName, partNames, colNames, txnId, writeIdList);
+          catName, dbName, tblName, partNames, colNames, writeIdList);
     }
     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
     if (table == null || writeIdList != null) {
       // The table is not yet loaded in cache
       return rawStore.get_aggr_stats_for(
-          catName, dbName, tblName, partNames, colNames, txnId, writeIdList);
+          catName, dbName, tblName, partNames, colNames, writeIdList);
     }
     List<String> allPartNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1);
     if (partNames.size() == allPartNames.size()) {

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index a5e6918..fb14536 100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@ -248,9 +248,9 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
   }
 
   @Override
-  public Table getTable(String catName, String dbName, String tableName, long txnId, String writeIdList)
+  public Table getTable(String catName, String dbName, String tableName, String writeIdList)
       throws MetaException {
-    return objectStore.getTable(catName, dbName, tableName, txnId, writeIdList);
+    return objectStore.getTable(catName, dbName, tableName, writeIdList);
   }
 
   @Override
@@ -267,9 +267,9 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
 
   @Override
   public Partition getPartition(String catName, String dbName, String tableName,
-                                List<String> partVals, long txnId, String writeIdList)
+                                List<String> partVals, String writeIdList)
       throws MetaException, NoSuchObjectException {
-    return objectStore.getPartition(catName, dbName, tableName, partVals, txnId, writeIdList);
+    return objectStore.getPartition(catName, dbName, tableName, partVals, writeIdList);
   }
 
   @Override
@@ -293,9 +293,9 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
 
   @Override
   public void alterTable(String catName, String dbName, String name, Table newTable,
-      long queryTxnId, String queryValidWriteIds)
+      String queryValidWriteIds)
       throws InvalidObjectException, MetaException {
-    objectStore.alterTable(catName, dbName, name, newTable, queryTxnId, queryValidWriteIds);
+    objectStore.alterTable(catName, dbName, name, newTable, queryValidWriteIds);
   }
 
   @Override
@@ -358,16 +358,16 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
 
   @Override
   public void alterPartition(String catName, String dbName, String tblName, List<String> partVals,
-      Partition newPart, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
-    objectStore.alterPartition(catName, dbName, tblName, partVals, newPart, queryTxnId, queryValidWriteIds);
+      Partition newPart, String queryValidWriteIds) throws InvalidObjectException, MetaException {
+    objectStore.alterPartition(catName, dbName, tblName, partVals, newPart, queryValidWriteIds);
   }
 
   @Override
   public void alterPartitions(String catName, String dbName, String tblName,
       List<List<String>> partValsList, List<Partition> newParts,
-      long writeId, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
+      long writeId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
     objectStore.alterPartitions(
-        catName, dbName, tblName, partValsList, newParts, writeId, queryTxnId, queryValidWriteIds);
+        catName, dbName, tblName, partValsList, newParts, writeId, queryValidWriteIds);
   }
 
   @Override
@@ -671,10 +671,10 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
   @Override
   public ColumnStatistics getTableColumnStatistics(String catName, String dbName,
                                                    String tableName, List<String> colNames,
-                                                   long txnId, String writeIdList)
+                                                   String writeIdList)
       throws MetaException, NoSuchObjectException {
     return objectStore.getTableColumnStatistics(
-        catName, dbName, tableName, colNames, txnId, writeIdList);
+        catName, dbName, tableName, colNames, writeIdList);
   }
 
   @Override
@@ -694,18 +694,18 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
   }
 
   @Override
-  public boolean updateTableColumnStatistics(ColumnStatistics statsObj, long txnId, String validWriteIds, long writeId)
+  public boolean updateTableColumnStatistics(ColumnStatistics statsObj, String validWriteIds, long writeId)
       throws NoSuchObjectException, MetaException, InvalidObjectException,
       InvalidInputException {
-    return objectStore.updateTableColumnStatistics(statsObj, txnId, validWriteIds, writeId);
+    return objectStore.updateTableColumnStatistics(statsObj, validWriteIds, writeId);
   }
 
   @Override
   public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,
-      List<String> partVals, long txnId, String validWriteIds, long writeId)
+      List<String> partVals, String validWriteIds, long writeId)
       throws NoSuchObjectException, MetaException, InvalidObjectException,
       InvalidInputException {
-    return objectStore.updatePartitionColumnStatistics(statsObj, partVals, txnId, validWriteIds, writeId);
+    return objectStore.updatePartitionColumnStatistics(statsObj, partVals, validWriteIds, writeId);
   }
 
   @Override
@@ -772,10 +772,10 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
   @Override
   public List<ColumnStatistics> getPartitionColumnStatistics(
       String catName, String dbName, String tblName, List<String> partNames,
-      List<String> colNames, long txnId, String writeIdList)
+      List<String> colNames, String writeIdList)
       throws MetaException, NoSuchObjectException {
     return objectStore.getPartitionColumnStatistics(
-             catName, dbName, tblName  , colNames, partNames, txnId, writeIdList);
+             catName, dbName, tblName  , colNames, partNames, writeIdList);
   }
 
   @Override
@@ -850,7 +850,7 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
   public AggrStats get_aggr_stats_for(String catName, String dbName,
                                       String tblName, List<String> partNames,
                                       List<String> colNames,
-                                      long txnId, String writeIdList)
+                                      String writeIdList)
       throws MetaException, NoSuchObjectException {
     return null;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
index 8270f6a..52785a6 100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@ -245,7 +245,7 @@ public class DummyRawStoreForJdoConnection implements RawStore {
 
   @Override
   public Table getTable(String catalogName, String dbName, String tableName,
-                        long txnid, String writeIdList) throws MetaException {
+                        String  writeIdList) throws MetaException {
     return null;
   }
 
@@ -264,7 +264,7 @@ public class DummyRawStoreForJdoConnection implements RawStore {
 
   @Override
   public Partition getPartition(String catName, String dbName, String tableName, List<String> part_vals,
-                                long txnid, String writeIdList)
+                                String  writeIdList)
       throws MetaException, NoSuchObjectException {
     return null;
   }
@@ -290,7 +290,7 @@ public class DummyRawStoreForJdoConnection implements RawStore {
   }
 
   @Override
-  public void alterTable(String catName, String dbname, String name, Table newTable, long queryTxnId, String queryValidWriteIds)
+  public void alterTable(String catName, String dbname, String name, Table newTable, String queryValidWriteIds)
       throws InvalidObjectException, MetaException {
   }
 
@@ -359,13 +359,13 @@ public class DummyRawStoreForJdoConnection implements RawStore {
 
   @Override
   public void alterPartition(String catName, String db_name, String tbl_name, List<String> part_vals,
-      Partition new_part, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
+      Partition new_part, String queryValidWriteIds) throws InvalidObjectException, MetaException {
   }
 
   @Override
   public void alterPartitions(String catName, String db_name, String tbl_name,
-                              List<List<String>> part_vals_list, List<Partition> new_parts,
-                              long writeId, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
+      List<List<String>> part_vals_list, List<Partition> new_parts,
+      long writeId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
   }
 
   @Override
@@ -720,7 +720,7 @@ public class DummyRawStoreForJdoConnection implements RawStore {
   @Override
   public ColumnStatistics getTableColumnStatistics(
       String catName, String dbName, String tableName, List<String> colName,
-      long txnid, String writeIdList)
+      String  writeIdList)
       throws MetaException, NoSuchObjectException {
     return null;
   }
@@ -744,14 +744,14 @@ public class DummyRawStoreForJdoConnection implements RawStore {
 
   @Override
   public boolean updateTableColumnStatistics(ColumnStatistics statsObj,
-      long txnId, String validWriteIds, long writeId)
+      String  validWriteIds, long writeId)
       throws NoSuchObjectException, MetaException, InvalidObjectException {
     return false;
   }
 
   @Override
   public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,List<String> partVals,
-      long txnId, String validWriteIds, long writeId)
+      String  validWriteIds, long writeId)
     throws NoSuchObjectException, MetaException, InvalidObjectException {
     return false;
   }
@@ -779,7 +779,7 @@ public class DummyRawStoreForJdoConnection implements RawStore {
   @Override
   public List<ColumnStatistics> getPartitionColumnStatistics(
       String catName, String dbName, String tblName, List<String> partNames,
-      List<String> colNames, long txnid, String writeIdList)
+      List<String> colNames, String  writeIdList)
       throws MetaException, NoSuchObjectException {
     return Collections.emptyList();
   }
@@ -850,7 +850,7 @@ public class DummyRawStoreForJdoConnection implements RawStore {
   @Override
   public AggrStats get_aggr_stats_for(
       String catName, String dbName, String tblName, List<String> partNames,
-      List<String> colNames, long txnid, String writeIdList)
+      List<String> colNames, String  writeIdList)
       throws MetaException, NoSuchObjectException {
     return null;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
index bc04e06..34055d2 100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
@@ -1428,17 +1428,6 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
     return fastpath ? t : deepCopy(filterHook.filterTable(t));
   }
 
-  @Override
-  public Table getTable(String dbName, String tableName, long txnId, String validWriteIdList)
-      throws MetaException, TException, NoSuchObjectException {
-    GetTableRequest req = new GetTableRequest(dbName, tableName);
-    req.setCapabilities(version);
-    req.setTxnId(txnId);
-    req.setValidWriteIdList(validWriteIdList);
-    Table t = client.get_table_req(req).getTable();
-    return fastpath ? t : deepCopy(filterHook.filterTable(t));
-  }
-
   /** {@inheritDoc} */
   @Override
   public List<Table> getTableObjectsByName(String dbName, List<String> tableNames)
@@ -1638,14 +1627,13 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
   @Override
   public void alter_partitions(String dbName, String tblName, List<Partition> newParts,
                                EnvironmentContext environmentContext,
-                               long txnId, String writeIdList, long writeId)
+                               String writeIdList, long writeId)
       throws InvalidOperationException, MetaException, TException {
     AlterPartitionsRequest req = new AlterPartitionsRequest();
     req.setDbName(dbName);
     req.setTableName(tblName);
     req.setPartitions(newParts);
     req.setEnvironmentContext(environmentContext);
-    req.setTxnId(txnId);
     req.setValidWriteIdList(writeIdList);
     client.alter_partitions_req(req);
   }
@@ -1758,10 +1746,9 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
 
   @Override
   public List<ColumnStatisticsObj> getTableColumnStatistics(
-      String dbName, String tableName, List<String> colNames, long txnId, String validWriteIdList)
+      String dbName, String tableName, List<String> colNames, String validWriteIdList)
       throws NoSuchObjectException, MetaException, TException {
     TableStatsRequest tsr = new TableStatsRequest(dbName, tableName, colNames);
-    tsr.setTxnId(txnId);
     tsr.setValidWriteIdList(validWriteIdList);
 
     return client.get_table_statistics_req(tsr).getTableStats();
@@ -1779,10 +1766,9 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
   @Override
   public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
       String dbName, String tableName, List<String> partNames,
-      List<String> colNames, long txnId, String validWriteIdList)
+      List<String> colNames, String validWriteIdList)
       throws NoSuchObjectException, MetaException, TException {
     PartitionsStatsRequest psr = new PartitionsStatsRequest(dbName, tableName, colNames, partNames);
-    psr.setTxnId(txnId);
     psr.setValidWriteIdList(validWriteIdList);
     return client.get_partitions_statistics_req(
         psr).getPartStats();
@@ -2651,14 +2637,13 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
   @Override
   public AggrStats getAggrColStatsFor(
       String dbName, String tblName, List<String> colNames,
-      List<String> partName, long txnId, String writeIdList)
+      List<String> partName, String writeIdList)
       throws NoSuchObjectException, MetaException, TException {
     if (colNames.isEmpty() || partName.isEmpty()) {
       LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side.");
       return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate
     }
     PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partName);
-    req.setTxnId(txnId);
     req.setValidWriteIdList(writeIdList);
     return client.get_aggr_stats_for(req);
   }
@@ -3072,7 +3057,7 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
 
   @Override
   public Table getTable(String catName, String dbName, String tableName,
-                        long txnId, String validWriteIdList) throws TException {
+                        String validWriteIdList) throws TException {
     throw new UnsupportedOperationException();
   }
 
@@ -3304,14 +3289,14 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
   public void alter_partitions(String catName, String dbName, String tblName,
                                List<Partition> newParts,
                                EnvironmentContext environmentContext,
-                               long txnId, String writeIdList, long writeId) throws
+                               String writeIdList, long writeId) throws
       InvalidOperationException, MetaException, TException {
     throw new UnsupportedOperationException();
   }
 
   @Override
   public void renamePartition(String catName, String dbname, String tableName,
-      List<String> part_vals, Partition newPart, long txnId, String validWriteIds)
+      List<String> part_vals, Partition newPart, String validWriteIds)
           throws InvalidOperationException, MetaException, TException {
     throw new UnsupportedOperationException();
   }
@@ -3339,7 +3324,7 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
   @Override
   public List<ColumnStatisticsObj> getTableColumnStatistics(
       String catName, String dbName, String tableName, List<String> colNames,
-      long txnId, String validWriteIdList)
+      String validWriteIdList)
       throws NoSuchObjectException, MetaException, TException {
     throw new UnsupportedOperationException();
   }
@@ -3357,7 +3342,7 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
   @Override
   public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
       String catName, String dbName, String tableName, List<String> partNames,
-      List<String> colNames, long txnId, String validWriteIdList)
+      List<String> colNames, String validWriteIdList)
       throws NoSuchObjectException, MetaException, TException {
     throw new UnsupportedOperationException();
   }
@@ -3412,7 +3397,7 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
   @Override
   public AggrStats getAggrColStatsFor(String catName, String dbName, String tblName,
                                       List<String> colNames, List<String> partNames,
-                                      long txnId, String writeIdList)
+                                      String writeIdList)
       throws NoSuchObjectException, MetaException, TException {
     throw new UnsupportedOperationException();
   }
@@ -3525,21 +3510,21 @@ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoClos
 
   @Override
   public void alter_table(String catName, String databaseName, String tblName, Table table,
-      EnvironmentContext environmentContext, long txnId, String validWriteIdList)
+      EnvironmentContext environmentContext, String validWriteIdList)
       throws InvalidOperationException, MetaException, TException {
     throw new UnsupportedOperationException();
   }
 
   @Override
   public void alter_partition(String dbName, String tblName, Partition newPart,
-      EnvironmentContext environmentContext, long txnId, String writeIdList)
+      EnvironmentContext environmentContext, String writeIdList)
       throws InvalidOperationException, MetaException, TException {
     throw new UnsupportedOperationException();
   }
 
   @Override
   public void truncateTable(String dbName, String tableName,
-      List<String> partNames, long txnId, String validWriteIds, long writeId)
+      List<String> partNames, String validWriteIds, long writeId)
       throws TException {
     throw new UnsupportedOperationException();
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java
index c5977b2..481d1d2 100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java
@@ -136,9 +136,8 @@ public class InjectableBehaviourObjectStore extends ObjectStore {
   }
 
   @Override
-  public Table getTable(String catName, String dbName, String tableName,
-      long txnId, String writeIdList) throws MetaException {
-    return getTableModifier.apply(super.getTable(catName, dbName, tableName, txnId, writeIdList));
+  public Table getTable(String catName, String dbName, String tableName, String writeIdList) throws MetaException {
+    return getTableModifier.apply(super.getTable(catName, dbName, tableName, writeIdList));
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
index 533cabb..8816480 100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
@@ -60,7 +60,7 @@ public class TestHiveAlterHandler {
         getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3"));
     HiveAlterHandler handler = new HiveAlterHandler();
     handler.setConf(conf);
-    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null, -1, null);
+    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null, null);
   }
 
   @Test
@@ -85,7 +85,7 @@ public class TestHiveAlterHandler {
     RawStore msdb = Mockito.mock(RawStore.class);
     HiveAlterHandler handler = new HiveAlterHandler();
     handler.setConf(conf);
-    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null, -1, null);
+    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null, null);
     Mockito.verify(msdb, Mockito.times(1)).getTableColumnStatistics(
         getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4")
     );
@@ -115,7 +115,7 @@ public class TestHiveAlterHandler {
         getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4"));
     HiveAlterHandler handler = new HiveAlterHandler();
     handler.setConf(conf);
-    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null, -1, null);
+    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null, null);
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
index 995271a..e53ad77 100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
@@ -245,7 +245,7 @@ public class TestObjectStore {
     newTbl1.setOwner("role1");
     newTbl1.setOwnerType(PrincipalType.ROLE);
 
-    objectStore.alterTable(DEFAULT_CATALOG_NAME, DB1, TABLE1, newTbl1, -1, null);
+    objectStore.alterTable(DEFAULT_CATALOG_NAME, DB1, TABLE1, newTbl1, null);
     tables = objectStore.getTables(DEFAULT_CATALOG_NAME, DB1, "new*");
     Assert.assertEquals(1, tables.size());
     Assert.assertEquals("new" + TABLE1, tables.get(0));
@@ -559,7 +559,7 @@ public class TestObjectStore {
         ColumnStatisticsObj partStats = new ColumnStatisticsObj("test_part_col", "int", data);
         statsObjList.add(partStats);
 
-        objectStore.updatePartitionColumnStatistics(stats, part.getValues(), -1, null, -1);
+        objectStore.updatePartitionColumnStatistics(stats, part.getValues(), null, -1);
       }
     }
     if (withPrivileges) {

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
index 01a8f81..36f91eb 100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
@@ -175,7 +175,7 @@ public class TestOldSchema {
       data.setLongStats(dcsd);
       obj.setStatsData(data);
       cs.addToStatsObj(obj);
-      store.updatePartitionColumnStatistics(cs, partVal, -1, null, -1);
+      store.updatePartitionColumnStatistics(cs, partVal, null, -1);
 
     }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
index e4854f9..bb20d9f 100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
@@ -232,7 +232,7 @@ public class TestCachedStore {
     tblOwner = "role1";
     tbl.setOwner(tblOwner);
     tbl.setOwnerType(PrincipalType.ROLE);
-    objectStore.alterTable(DEFAULT_CATALOG_NAME, dbName, tblName, tbl, -1, null);
+    objectStore.alterTable(DEFAULT_CATALOG_NAME, dbName, tblName, tbl, null);
     tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
 
     Assert.assertEquals("Owner of the table did not change.", tblOwner, tbl.getOwner());
@@ -338,7 +338,7 @@ public class TestCachedStore {
     Partition ptn1Atl =
         new Partition(Arrays.asList(ptnColVal1Alt), dbName, tblName, 0, 0, tbl.getSd(), partParams);
     ptn1Atl.setCatName(DEFAULT_CATALOG_NAME);
-    objectStore.alterPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1), ptn1Atl, -1, null);
+    objectStore.alterPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1), ptn1Atl, null);
     ptn1Atl = objectStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1Alt));
 
     // Drop an existing partition ("bbb") via ObjectStore
@@ -455,7 +455,7 @@ public class TestCachedStore {
     stats.setStatsObj(colStatObjs);
 
     // Save to DB
-    objectStore.updateTableColumnStatistics(stats, -1, null, -1);
+    objectStore.updateTableColumnStatistics(stats, null, -1);
 
     // Prewarm CachedStore
     CachedStore.setCachePrewarmedState(false);
@@ -720,8 +720,8 @@ public class TestCachedStore {
     stats.setStatsDesc(statsDesc);
     stats.setStatsObj(colStatObjs);
 
-    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1, -1, null, -1);
-    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2, -1, null, -1);
+    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1, null, -1);
+    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2, null, -1);
 
     List<String> colNames = new ArrayList<>();
     colNames.add(colName);
@@ -790,10 +790,10 @@ public class TestCachedStore {
     stats.setStatsDesc(statsDesc);
     stats.setStatsObj(colStatObjs);
 
-    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1, -1, null, -1);
+    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1, null, -1);
 
     longStats.setNumDVs(40);
-    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2, -1, null, -1);
+    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2, null, -1);
 
     List<String> colNames = new ArrayList<>();
     colNames.add(colName);
@@ -871,7 +871,7 @@ public class TestCachedStore {
     stats.setStatsDesc(statsDesc);
     stats.setStatsObj(colStatObjs);
 
-    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1, -1, null, -1);
+    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1, null, -1);
 
     longStats.setNumDVs(40);
     hll = HyperLogLog.builder().build();
@@ -881,7 +881,7 @@ public class TestCachedStore {
     hll.addLong(5);
     longStats.setBitVectors(hll.serialize());
 
-    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2, -1, null, -1);
+    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2, null, -1);
 
     List<String> colNames = new ArrayList<>();
     colNames.add(colName);


[29/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/651e7950
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/651e7950
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/651e7950

Branch: refs/heads/master
Commit: 651e7950977dd4e63da42648c38b03c3bf097e7f
Parents: f0a2fff 851c8ab
Author: sergey <se...@apache.org>
Authored: Thu Jul 19 14:44:10 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Thu Jul 19 14:44:10 2018 -0700

----------------------------------------------------------------------
 .gitignore                                      |     1 +
 RELEASE_NOTES.txt                               |     8 +
 .../org/apache/hive/beeline/BeeLineOpts.java    |    11 +-
 .../apache/hive/beeline/cli/TestHiveCli.java    |     2 -
 .../org/apache/hadoop/hive/conf/HiveConf.java   |     2 +
 .../HiveHBaseTableSnapshotInputFormat.java      |     4 +-
 hcatalog/core/pom.xml                           |     7 +
 hcatalog/webhcat/java-client/pom.xml            |     7 +
 hcatalog/webhcat/svr/pom.xml                    |     7 +
 itests/hcatalog-unit/pom.xml                    |     6 +
 itests/hive-blobstore/pom.xml                   |    13 +
 .../insert_overwrite_directory.q.out            |     2 +
 .../write_final_output_blobstore.q.out          |     8 +
 itests/hive-minikdc/pom.xml                     |    13 +
 itests/hive-unit-hadoop2/pom.xml                |     6 +
 itests/hive-unit/pom.xml                        |     8 +-
 itests/qtest-accumulo/pom.xml                   |    13 +
 itests/qtest-spark/pom.xml                      |    13 +
 itests/qtest/pom.xml                            |    13 +
 .../test/resources/testconfiguration.properties |     7 +-
 itests/util/pom.xml                             |     6 +
 llap-server/pom.xml                             |     7 +
 metastore/pom.xml                               |     5 +
 packaging/src/main/assembly/bin.xml             |     2 +-
 packaging/src/main/assembly/src.xml             |     1 +
 ql/pom.xml                                      |     8 +
 .../hadoop/hive/ql/plan/api/OperatorType.java   |     5 +-
 ...eColumnArithmeticIntervalYearMonthColumn.txt |     3 +-
 ...YearMonthColumnArithmeticTimestampColumn.txt |     4 +-
 .../java/org/apache/hadoop/hive/ql/Context.java |    10 +
 .../java/org/apache/hadoop/hive/ql/Driver.java  |     5 +-
 .../org/apache/hadoop/hive/ql/QueryPlan.java    |     9 +
 .../apache/hadoop/hive/ql/exec/ExplainTask.java |    13 +-
 .../hadoop/hive/ql/exec/KeyWrapperFactory.java  |     2 +-
 .../hadoop/hive/ql/exec/OperatorFactory.java    |     4 +
 .../hadoop/hive/ql/exec/TableScanOperator.java  |     7 +
 .../hadoop/hive/ql/exec/TopNKeyOperator.java    |   214 +
 .../hadoop/hive/ql/exec/repl/ReplLoadTask.java  |     6 +-
 .../IncrementalLoadTasksBuilder.java            |    10 +-
 .../hive/ql/exec/vector/VectorAssignRow.java    |    20 +-
 .../ql/exec/vector/VectorTopNKeyOperator.java   |   304 +
 .../apache/hadoop/hive/ql/hooks/ATSHook.java    |     3 +-
 .../hive/ql/hooks/HiveProtoLoggingHook.java     |     3 +-
 .../metadata/HiveMaterializedViewsRegistry.java |    11 +-
 .../hive/ql/optimizer/TopNKeyProcessor.java     |   109 +
 .../ql/optimizer/calcite/RelOptHiveTable.java   |    73 +-
 .../hive/ql/optimizer/physical/Vectorizer.java  |    37 +
 .../hadoop/hive/ql/parse/CalcitePlanner.java    |    86 +-
 .../hive/ql/parse/ExplainSemanticAnalyzer.java  |     3 +-
 .../hadoop/hive/ql/parse/TezCompiler.java       |    27 +
 .../apache/hadoop/hive/ql/plan/ExplainWork.java |    14 +-
 .../apache/hadoop/hive/ql/plan/TopNKeyDesc.java |   139 +
 .../hadoop/hive/ql/plan/VectorTopNKeyDesc.java  |    39 +
 .../hadoop/hive/ql/txn/compactor/Worker.java    |     1 +
 .../hive/ql/udf/generic/GenericUDAFCount.java   |     6 +-
 .../hadoop/hive/ql/exec/TestExplainTask.java    |     2 +-
 .../ql/exec/vector/VectorRandomRowSource.java   |    35 +-
 .../vector/aggregation/AggregationBase.java     |    22 +-
 .../aggregation/TestVectorAggregation.java      |   133 +-
 .../expressions/TestVectorArithmetic.java       |   143 +-
 .../expressions/TestVectorCastStatement.java    |     2 +
 .../expressions/TestVectorDateAddSub.java       |     4 +
 .../vector/expressions/TestVectorDateDiff.java  |     4 +
 .../expressions/TestVectorIfStatement.java      |     2 +
 .../vector/expressions/TestVectorNegative.java  |     2 +
 .../expressions/TestVectorStringConcat.java     |     2 +
 .../expressions/TestVectorStringUnary.java      |     2 +
 .../vector/expressions/TestVectorSubStr.java    |     2 +
 .../expressions/TestVectorTimestampExtract.java |     3 +
 .../parse/TestUpdateDeleteSemanticAnalyzer.java |     2 +-
 ql/src/test/queries/clientpositive/bucket7.q    |    12 +
 ql/src/test/queries/clientpositive/topnkey.q    |    31 +
 .../queries/clientpositive/vector_topnkey.q     |    30 +
 .../results/clientpositive/acid_nullscan.q.out  |     3 +
 .../alter_partition_coltype.q.out               |    18 +
 .../results/clientpositive/ambiguitycheck.q.out |     3 +
 .../analyze_table_null_partition.q.out          |     2 +
 .../clientpositive/autoColumnStats_1.q.out      |     2 +
 .../clientpositive/autoColumnStats_2.q.out      |     2 +
 .../auto_join_reordering_values.q.out           |    17 +
 .../test/results/clientpositive/bucket1.q.out   |     2 +
 .../test/results/clientpositive/bucket2.q.out   |     2 +
 .../test/results/clientpositive/bucket3.q.out   |     2 +
 .../test/results/clientpositive/bucket7.q.out   |    56 +
 .../clientpositive/bucket_map_join_spark1.q.out |    14 +
 .../clientpositive/bucket_map_join_spark2.q.out |    14 +
 .../clientpositive/bucket_map_join_spark3.q.out |    14 +
 .../clientpositive/bucket_map_join_spark4.q.out |    20 +
 .../test/results/clientpositive/combine2.q.out  |     3 +
 .../test/results/clientpositive/comments.q.out  |     7 +
 .../constantPropagateForSubQuery.q.out          |     6 +
 .../dynamic_partition_skip_default.q.out        |     9 +
 .../encryption_join_unencrypted_tbl.q.out       |     7 +
 ...on_join_with_different_encryption_keys.q.out |     7 +
 .../erasurecoding/erasure_explain.q.out         |     4 +
 .../extrapolate_part_stats_date.q.out           |     2 +
 .../extrapolate_part_stats_full.q.out           |     8 +
 .../extrapolate_part_stats_partial.q.out        |    12 +
 .../results/clientpositive/filter_aggr.q.out    |     3 +
 .../clientpositive/filter_join_breaktask.q.out  |    10 +
 .../results/clientpositive/filter_union.q.out   |     7 +
 .../clientpositive/groupby_sort_1_23.q.out      |    71 +
 .../results/clientpositive/groupby_sort_6.q.out |    12 +
 .../clientpositive/groupby_sort_skew_1_23.q.out |    71 +
 .../test/results/clientpositive/input23.q.out   |     8 +
 ql/src/test/results/clientpositive/input4.q.out |     2 +-
 .../test/results/clientpositive/input42.q.out   |     9 +
 .../results/clientpositive/input_part1.q.out    |     3 +
 .../results/clientpositive/input_part9.q.out    |     3 +
 ql/src/test/results/clientpositive/join17.q.out |     7 +
 ql/src/test/results/clientpositive/join26.q.out |    10 +
 ql/src/test/results/clientpositive/join32.q.out |    10 +
 ql/src/test/results/clientpositive/join33.q.out |    10 +
 ql/src/test/results/clientpositive/join34.q.out |    11 +
 ql/src/test/results/clientpositive/join35.q.out |    13 +
 ql/src/test/results/clientpositive/join9.q.out  |     7 +
 .../clientpositive/join_filters_overlap.q.out   |    50 +
 .../clientpositive/list_bucket_dml_1.q.out      |     6 +
 .../clientpositive/list_bucket_dml_11.q.out     |     5 +
 .../clientpositive/list_bucket_dml_12.q.out     |     8 +
 .../clientpositive/list_bucket_dml_13.q.out     |     5 +
 .../clientpositive/list_bucket_dml_14.q.out     |     5 +
 .../clientpositive/list_bucket_dml_2.q.out      |     6 +
 .../clientpositive/list_bucket_dml_3.q.out      |     6 +
 .../clientpositive/list_bucket_dml_4.q.out      |     9 +
 .../clientpositive/list_bucket_dml_5.q.out      |     6 +
 .../clientpositive/list_bucket_dml_6.q.out      |     9 +
 .../clientpositive/list_bucket_dml_7.q.out      |     9 +
 .../clientpositive/list_bucket_dml_8.q.out      |     6 +
 .../clientpositive/list_bucket_dml_9.q.out      |     9 +
 .../list_bucket_query_multiskew_1.q.out         |    12 +
 .../list_bucket_query_multiskew_2.q.out         |     9 +
 .../list_bucket_query_multiskew_3.q.out         |    12 +
 .../list_bucket_query_oneskew_1.q.out           |     9 +
 .../list_bucket_query_oneskew_2.q.out           |    14 +
 .../list_bucket_query_oneskew_3.q.out           |     3 +
 .../llap/acid_bucket_pruning.q.out              |     3 +
 .../clientpositive/llap/autoColumnStats_1.q.out |     2 +
 .../clientpositive/llap/autoColumnStats_2.q.out |     2 +
 .../llap/auto_sortmerge_join_1.q.out            |    21 +
 .../llap/auto_sortmerge_join_11.q.out           |    14 +
 .../llap/auto_sortmerge_join_12.q.out           |    12 +
 .../llap/auto_sortmerge_join_2.q.out            |    14 +
 .../llap/auto_sortmerge_join_3.q.out            |    21 +
 .../llap/auto_sortmerge_join_4.q.out            |    21 +
 .../llap/auto_sortmerge_join_5.q.out            |    21 +
 .../llap/auto_sortmerge_join_7.q.out            |    21 +
 .../llap/auto_sortmerge_join_8.q.out            |    21 +
 .../results/clientpositive/llap/bucket2.q.out   |     2 +
 .../results/clientpositive/llap/bucket3.q.out   |     2 +
 .../results/clientpositive/llap/bucket4.q.out   |     2 +
 .../clientpositive/llap/bucket_groupby.q.out    |   274 +-
 .../clientpositive/llap/bucket_many.q.out       |     2 +
 .../llap/bucket_num_reducers.q.out              |     2 +
 .../llap/bucket_num_reducers2.q.out             |     2 +
 .../clientpositive/llap/bucketmapjoin1.q.out    |    28 +
 .../clientpositive/llap/bucketmapjoin2.q.out    |    21 +
 .../clientpositive/llap/bucketmapjoin3.q.out    |    14 +
 .../clientpositive/llap/bucketmapjoin4.q.out    |    14 +
 .../clientpositive/llap/bucketpruning1.q.out    |    54 +
 .../clientpositive/llap/check_constraint.q.out  |    27 +-
 .../llap/current_date_timestamp.q.out           |     2 +
 .../llap/disable_merge_for_bucketing.q.out      |     2 +
 .../llap/dynamic_semijoin_reduction.q.out       |     7 +
 .../llap/dynamic_semijoin_user_level.q.out      |     7 +
 .../clientpositive/llap/explainuser_1.q.out     |    28 +-
 .../clientpositive/llap/explainuser_2.q.out     |   406 +-
 .../extrapolate_part_stats_partial_ndv.q.out    |     6 +
 .../llap/filter_join_breaktask.q.out            |    10 +
 .../clientpositive/llap/filter_union.q.out      |     7 +
 .../clientpositive/llap/join32_lessSize.q.out   |    44 +
 .../clientpositive/llap/limit_pushdown.q.out    |   135 +-
 .../clientpositive/llap/limit_pushdown3.q.out   |    89 +-
 .../llap/list_bucket_dml_10.q.out               |     2 +
 .../llap/llap_decimal64_reader.q.out            |    46 +-
 .../clientpositive/llap/mapjoin_mapjoin.q.out   |    10 +
 .../clientpositive/llap/metadataonly1.q.out     |    27 +
 .../clientpositive/llap/offset_limit.q.out      |    27 +-
 .../llap/offset_limit_ppd_optimizer.q.out       |    85 +-
 .../llap/orc_struct_type_vectorization.q.out    |    53 +-
 .../parquet_complex_types_vectorization.q.out   |   159 +-
 .../llap/parquet_map_type_vectorization.q.out   |    53 +-
 .../parquet_struct_type_vectorization.q.out     |    53 +-
 .../clientpositive/llap/partition_pruning.q.out |     9 +
 .../clientpositive/llap/ppd_union_view.q.out    |    24 +
 .../clientpositive/llap/smb_mapjoin_15.q.out    |    36 +
 .../results/clientpositive/llap/stats11.q.out   |    14 +
 .../llap/tez_fixed_bucket_pruning.q.out         |    32 +
 .../llap/tez_join_result_complex.q.out          |    14 +
 .../results/clientpositive/llap/topnkey.q.out   |   318 +
 .../clientpositive/llap/unionDistinct_1.q.out   |    74 +
 .../clientpositive/llap/union_stats.q.out       |    93 +-
 .../llap/vector_cast_constant.q.out             |    55 +-
 .../clientpositive/llap/vector_char_2.q.out     |   110 +-
 .../vector_groupby_grouping_sets_limit.q.out    |   346 +-
 .../llap/vector_groupby_reduce.q.out            |    49 +-
 .../llap/vector_mr_diff_schema_alias.q.out      |    25 +-
 .../llap/vector_reduce_groupby_decimal.q.out    |    53 +-
 .../llap/vector_string_concat.q.out             |    47 +-
 .../clientpositive/llap/vector_topnkey.q.out    |   592 +
 .../clientpositive/llap/vectorization_0.q.out   |     9 +
 .../llap/vectorization_limit.q.out              |    63 +-
 .../clientpositive/louter_join_ppr.q.out        |    28 +
 ql/src/test/results/clientpositive/macro.q.out  |     9 +
 .../clientpositive/mapjoin_mapjoin.q.out        |    10 +
 ql/src/test/results/clientpositive/merge3.q.out |     5 +
 .../offset_limit_global_optimizer.q.out         |    20 +
 .../results/clientpositive/outer_join_ppr.q.out |    14 +
 .../parquet_vectorization_0.q.out               |     9 +
 ql/src/test/results/clientpositive/pcr.q.out    |    82 +
 .../clientpositive/perf/tez/query10.q.out       |   346 +-
 .../clientpositive/perf/tez/query14.q.out       |  2198 +--
 .../clientpositive/perf/tez/query15.q.out       |   138 +-
 .../clientpositive/perf/tez/query17.q.out       |   372 +-
 .../clientpositive/perf/tez/query25.q.out       |   366 +-
 .../clientpositive/perf/tez/query26.q.out       |   226 +-
 .../clientpositive/perf/tez/query27.q.out       |   230 +-
 .../clientpositive/perf/tez/query29.q.out       |   374 +-
 .../clientpositive/perf/tez/query35.q.out       |   346 +-
 .../clientpositive/perf/tez/query37.q.out       |   142 +-
 .../clientpositive/perf/tez/query40.q.out       |   206 +-
 .../clientpositive/perf/tez/query43.q.out       |   128 +-
 .../clientpositive/perf/tez/query45.q.out       |   272 +-
 .../clientpositive/perf/tez/query49.q.out       |   478 +-
 .../clientpositive/perf/tez/query5.q.out        |   542 +-
 .../clientpositive/perf/tez/query50.q.out       |   250 +-
 .../clientpositive/perf/tez/query60.q.out       |   546 +-
 .../clientpositive/perf/tez/query66.q.out       |   452 +-
 .../clientpositive/perf/tez/query69.q.out       |   364 +-
 .../clientpositive/perf/tez/query7.q.out        |   226 +-
 .../clientpositive/perf/tez/query76.q.out       |   356 +-
 .../clientpositive/perf/tez/query77.q.out       |   562 +-
 .../clientpositive/perf/tez/query8.q.out        |   276 +-
 .../clientpositive/perf/tez/query80.q.out       |   756 +-
 .../clientpositive/perf/tez/query82.q.out       |   142 +-
 .../clientpositive/perf/tez/query99.q.out       |   230 +-
 .../test/results/clientpositive/plan_json.q.out |     2 +-
 .../results/clientpositive/pointlookup2.q.out   |    56 +
 .../results/clientpositive/pointlookup3.q.out   |    53 +
 .../results/clientpositive/pointlookup4.q.out   |     4 +
 .../clientpositive/ppd_join_filter.q.out        |    36 +
 ql/src/test/results/clientpositive/ppd_vc.q.out |    11 +
 .../clientpositive/ppr_allchildsarenull.q.out   |     6 +
 .../test/results/clientpositive/push_or.q.out   |     4 +
 .../clientpositive/rand_partitionpruner1.q.out  |     3 +
 .../clientpositive/rand_partitionpruner2.q.out  |     3 +
 .../clientpositive/rand_partitionpruner3.q.out  |     6 +
 .../clientpositive/router_join_ppr.q.out        |    28 +
 .../clientpositive/serde_user_properties.q.out  |     8 +
 .../spark/auto_join_reordering_values.q.out     |    17 +
 .../spark/auto_sortmerge_join_1.q.out           |    21 +
 .../spark/auto_sortmerge_join_12.q.out          |    12 +
 .../spark/auto_sortmerge_join_2.q.out           |    14 +
 .../spark/auto_sortmerge_join_3.q.out           |    21 +
 .../spark/auto_sortmerge_join_4.q.out           |    21 +
 .../spark/auto_sortmerge_join_5.q.out           |    21 +
 .../spark/auto_sortmerge_join_7.q.out           |    21 +
 .../spark/auto_sortmerge_join_8.q.out           |    21 +
 .../results/clientpositive/spark/bucket2.q.out  |     2 +
 .../results/clientpositive/spark/bucket3.q.out  |     2 +
 .../results/clientpositive/spark/bucket4.q.out  |     2 +
 .../clientpositive/spark/bucket4.q.out_spark    |     2 +
 .../results/clientpositive/spark/bucket7.q.out  |    56 +
 .../spark/bucket_map_join_spark1.q.out          |    14 +
 .../spark/bucket_map_join_spark2.q.out          |    14 +
 .../spark/bucket_map_join_spark3.q.out          |    14 +
 .../spark/bucket_map_join_spark4.q.out          |    20 +
 .../clientpositive/spark/bucketmapjoin1.q.out   |    28 +
 .../clientpositive/spark/bucketmapjoin2.q.out   |    21 +
 .../clientpositive/spark/bucketmapjoin3.q.out   |    14 +
 .../clientpositive/spark/bucketmapjoin4.q.out   |    14 +
 .../spark/disable_merge_for_bucketing.q.out     |     2 +
 .../disable_merge_for_bucketing.q.out_spark     |     2 +
 .../spark/filter_join_breaktask.q.out           |    10 +
 .../spark/groupby_sort_1_23.q.out               |    71 +
 .../spark/groupby_sort_skew_1_23.q.out          |    71 +
 .../results/clientpositive/spark/join17.q.out   |     7 +
 .../results/clientpositive/spark/join26.q.out   |    10 +
 .../results/clientpositive/spark/join32.q.out   |    10 +
 .../clientpositive/spark/join32_lessSize.q.out  |    44 +
 .../results/clientpositive/spark/join33.q.out   |    10 +
 .../results/clientpositive/spark/join34.q.out   |    11 +
 .../results/clientpositive/spark/join35.q.out   |    13 +
 .../results/clientpositive/spark/join9.q.out    |     7 +
 .../spark/join_filters_overlap.q.out            |    50 +
 .../spark/list_bucket_dml_10.q.out              |     2 +
 .../spark/list_bucket_dml_2.q.out               |     6 +
 .../clientpositive/spark/louter_join_ppr.q.out  |    28 +
 .../clientpositive/spark/mapjoin_mapjoin.q.out  |    10 +
 .../clientpositive/spark/outer_join_ppr.q.out   |    14 +
 .../spark/parquet_vectorization_0.q.out         |     9 +
 .../test/results/clientpositive/spark/pcr.q.out |    82 +
 .../clientpositive/spark/ppd_join_filter.q.out  |    36 +
 .../clientpositive/spark/router_join_ppr.q.out  |    28 +
 .../clientpositive/spark/smb_mapjoin_15.q.out   |    36 +
 .../spark/spark_union_merge.q.out               |    14 +
 .../results/clientpositive/spark/stats0.q.out   |     4 +
 .../results/clientpositive/spark/union22.q.out  |    11 +
 .../results/clientpositive/spark/union24.q.out  |    47 +
 .../clientpositive/spark/vectorization_0.q.out  |     9 +
 ql/src/test/results/clientpositive/stats0.q.out |     4 +
 .../results/clientpositive/tez/topnkey.q.out    |   162 +
 .../clientpositive/tez/vector_topnkey.q.out     |   162 +
 .../test/results/clientpositive/topnkey.q.out   |   301 +
 .../truncate_column_list_bucket.q.out           |     6 +
 .../results/clientpositive/udf_reflect2.q.out   |     3 +
 .../test/results/clientpositive/union22.q.out   |    11 +
 .../test/results/clientpositive/union24.q.out   |    47 +
 .../clientpositive/vector_outer_join3.q.out     |     6 +-
 .../clientpositive/vector_outer_join4.q.out     |     6 +-
 .../clientpositive/vector_outer_join6.q.out     |     4 +-
 .../results/clientpositive/vector_topnkey.q.out |   480 +
 .../objectinspector/ObjectInspectorUtils.java   |    19 +
 service/pom.xml                                 |     7 +
 standalone-metastore/metastore-common/pom.xml   |   128 -
 .../metastore-common/src/assembly/bin.xml       |    28 -
 .../hadoop/hive/common/StatsSetupConst.java     |   336 -
 .../common/ndv/NumDistinctValueEstimator.java   |    51 -
 .../ndv/NumDistinctValueEstimatorFactory.java   |    75 -
 .../hadoop/hive/common/ndv/fm/FMSketch.java     |   359 -
 .../hive/common/ndv/fm/FMSketchUtils.java       |   132 -
 .../hive/common/ndv/hll/HLLConstants.java       |   933 --
 .../hive/common/ndv/hll/HLLDenseRegister.java   |   202 -
 .../hadoop/hive/common/ndv/hll/HLLRegister.java |    50 -
 .../hive/common/ndv/hll/HLLSparseRegister.java  |   261 -
 .../hadoop/hive/common/ndv/hll/HyperLogLog.java |   664 -
 .../hive/common/ndv/hll/HyperLogLogUtils.java   |   409 -
 .../hive/metastore/AcidEventListener.java       |   146 -
 .../hive/metastore/AggregateStatsCache.java     |   571 -
 .../hadoop/hive/metastore/AlterHandler.java     |   204 -
 .../apache/hadoop/hive/metastore/Batchable.java |    86 -
 .../hadoop/hive/metastore/ColumnType.java       |   301 -
 .../hadoop/hive/metastore/DatabaseProduct.java  |    75 -
 .../apache/hadoop/hive/metastore/Deadline.java  |   172 -
 .../hive/metastore/DeadlineException.java       |    29 -
 .../DefaultPartitionExpressionProxy.java        |    57 -
 .../metastore/DefaultStorageSchemaReader.java   |    38 -
 .../hive/metastore/FileMetadataHandler.java     |   109 -
 .../hive/metastore/FileMetadataManager.java     |   119 -
 .../hive/metastore/HMSMetricsListener.java      |    90 -
 .../hadoop/hive/metastore/HiveAlterHandler.java |   961 --
 .../hadoop/hive/metastore/HiveMetaStore.java    |  9602 -------------
 .../hive/metastore/HiveMetaStoreClient.java     |  3597 -----
 .../hive/metastore/HiveMetaStoreFsImpl.java     |    55 -
 .../hive/metastore/IExtrapolatePartStatus.java  |    85 -
 .../hadoop/hive/metastore/IHMSHandler.java      |   109 -
 .../hadoop/hive/metastore/IMetaStoreClient.java |  3757 -----
 .../hive/metastore/IMetaStoreSchemaInfo.java    |   115 -
 .../metastore/LinearExtrapolatePartStatus.java  |   106 -
 .../hive/metastore/LockComponentBuilder.java    |   121 -
 .../hive/metastore/LockRequestBuilder.java      |   168 -
 .../MaterializationsRebuildLockCleanerTask.java |    81 -
 .../MaterializationsRebuildLockHandler.java     |   216 -
 .../hive/metastore/MetaStoreDirectSql.java      |  2837 ----
 .../metastore/MetaStoreEndFunctionContext.java  |    59 -
 .../metastore/MetaStoreEndFunctionListener.java |    58 -
 .../hive/metastore/MetaStoreEventListener.java  |   306 -
 .../MetaStoreEventListenerConstants.java        |    41 -
 .../hadoop/hive/metastore/MetaStoreFS.java      |    43 -
 .../hadoop/hive/metastore/MetaStoreInit.java    |   109 -
 .../hive/metastore/MetaStoreInitContext.java    |    27 -
 .../hive/metastore/MetaStoreInitListener.java   |    49 -
 .../metastore/MetaStoreListenerNotifier.java    |   375 -
 .../metastore/MetaStorePreEventListener.java    |    57 -
 .../hive/metastore/MetaStoreSchemaInfo.java     |   246 -
 .../metastore/MetaStoreSchemaInfoFactory.java   |    64 -
 .../hadoop/hive/metastore/MetaStoreThread.java  |    58 -
 .../hadoop/hive/metastore/MetadataStore.java    |    52 -
 .../hive/metastore/MetastoreTaskThread.java     |    38 -
 .../hadoop/hive/metastore/ObjectStore.java      | 12509 -----------------
 .../hive/metastore/PartFilterExprUtil.java      |   165 -
 .../metastore/PartitionExpressionProxy.java     |    73 -
 .../apache/hadoop/hive/metastore/RawStore.java  |  1719 ---
 .../hadoop/hive/metastore/RawStoreProxy.java    |   114 -
 .../hive/metastore/ReplChangeManager.java       |   501 -
 .../hive/metastore/RetryingHMSHandler.java      |   232 -
 .../hive/metastore/RetryingMetaStoreClient.java |   341 -
 .../hive/metastore/RuntimeStatsCleanerTask.java |    66 -
 .../metastore/SessionPropertiesListener.java    |    46 -
 .../hive/metastore/StatObjectConverter.java     |   892 --
 .../hive/metastore/TServerSocketKeepAlive.java  |    47 -
 .../hive/metastore/TSetIpAddressProcessor.java  |    62 -
 .../hive/metastore/TUGIBasedProcessor.java      |   183 -
 .../hadoop/hive/metastore/TableIterable.java    |   115 -
 .../hadoop/hive/metastore/ThreadPool.java       |    63 -
 .../TransactionalMetaStoreEventListener.java    |    39 -
 .../TransactionalValidationListener.java        |   487 -
 .../apache/hadoop/hive/metastore/Warehouse.java |   756 -
 .../hive/metastore/api/utils/DecimalUtils.java  |    49 -
 .../hive/metastore/cache/ByteArrayWrapper.java  |    45 -
 .../hadoop/hive/metastore/cache/CacheUtils.java |   136 -
 .../hive/metastore/cache/CachedStore.java       |  2532 ----
 .../hive/metastore/cache/SharedCache.java       |  1650 ---
 .../client/builder/CatalogBuilder.java          |    62 -
 .../client/builder/ConstraintBuilder.java       |   115 -
 .../client/builder/DatabaseBuilder.java         |   122 -
 .../client/builder/FunctionBuilder.java         |   143 -
 .../GrantRevokePrivilegeRequestBuilder.java     |    63 -
 .../builder/HiveObjectPrivilegeBuilder.java     |    69 -
 .../client/builder/HiveObjectRefBuilder.java    |    69 -
 .../client/builder/ISchemaBuilder.java          |   102 -
 .../client/builder/PartitionBuilder.java        |   119 -
 .../builder/PrivilegeGrantInfoBuilder.java      |    84 -
 .../metastore/client/builder/RoleBuilder.java   |    55 -
 .../builder/SQLCheckConstraintBuilder.java      |    51 -
 .../builder/SQLDefaultConstraintBuilder.java    |    51 -
 .../client/builder/SQLForeignKeyBuilder.java    |   103 -
 .../builder/SQLNotNullConstraintBuilder.java    |    52 -
 .../client/builder/SQLPrimaryKeyBuilder.java    |    52 -
 .../builder/SQLUniqueConstraintBuilder.java     |    46 -
 .../client/builder/SchemaVersionBuilder.java    |   114 -
 .../client/builder/SerdeAndColsBuilder.java     |   124 -
 .../builder/StorageDescriptorBuilder.java       |   163 -
 .../metastore/client/builder/TableBuilder.java  |   224 -
 .../aggr/BinaryColumnStatsAggregator.java       |    61 -
 .../aggr/BooleanColumnStatsAggregator.java      |    62 -
 .../columnstats/aggr/ColumnStatsAggregator.java |    35 -
 .../aggr/ColumnStatsAggregatorFactory.java      |   113 -
 .../aggr/DateColumnStatsAggregator.java         |   360 -
 .../aggr/DecimalColumnStatsAggregator.java      |   375 -
 .../aggr/DoubleColumnStatsAggregator.java       |   348 -
 .../aggr/IExtrapolatePartStatus.java            |    47 -
 .../aggr/LongColumnStatsAggregator.java         |   348 -
 .../aggr/StringColumnStatsAggregator.java       |   304 -
 .../cache/DateColumnStatsDataInspector.java     |   124 -
 .../cache/DecimalColumnStatsDataInspector.java  |   124 -
 .../cache/DoubleColumnStatsDataInspector.java   |   124 -
 .../cache/LongColumnStatsDataInspector.java     |   124 -
 .../cache/StringColumnStatsDataInspector.java   |   125 -
 .../merge/BinaryColumnStatsMerger.java          |    35 -
 .../merge/BooleanColumnStatsMerger.java         |    35 -
 .../columnstats/merge/ColumnStatsMerger.java    |    31 -
 .../merge/ColumnStatsMergerFactory.java         |   120 -
 .../merge/DateColumnStatsMerger.java            |    59 -
 .../merge/DecimalColumnStatsMerger.java         |    85 -
 .../merge/DoubleColumnStatsMerger.java          |    54 -
 .../merge/LongColumnStatsMerger.java            |    54 -
 .../merge/StringColumnStatsMerger.java          |    54 -
 .../metastore/conf/ConfTemplatePrinter.java     |   150 -
 .../hive/metastore/conf/MetastoreConf.java      |  1688 ---
 .../hive/metastore/conf/TimeValidator.java      |    67 -
 .../datasource/BoneCPDataSourceProvider.java    |    87 -
 .../datasource/DataSourceProvider.java          |    79 -
 .../datasource/DataSourceProviderFactory.java   |    66 -
 .../datasource/DbCPDataSourceProvider.java      |   117 -
 .../datasource/HikariCPDataSourceProvider.java  |    89 -
 .../hive/metastore/datasource/package-info.java |    23 -
 .../hive/metastore/events/AbortTxnEvent.java    |    51 -
 .../hive/metastore/events/AcidWriteEvent.java   |    91 -
 .../metastore/events/AddForeignKeyEvent.java    |    41 -
 .../events/AddNotNullConstraintEvent.java       |    42 -
 .../metastore/events/AddPartitionEvent.java     |    84 -
 .../metastore/events/AddPrimaryKeyEvent.java    |    42 -
 .../metastore/events/AddSchemaVersionEvent.java |    40 -
 .../events/AddUniqueConstraintEvent.java        |    42 -
 .../metastore/events/AllocWriteIdEvent.java     |    57 -
 .../metastore/events/AlterCatalogEvent.java     |    44 -
 .../metastore/events/AlterDatabaseEvent.java    |    56 -
 .../metastore/events/AlterISchemaEvent.java     |    45 -
 .../metastore/events/AlterPartitionEvent.java   |    75 -
 .../events/AlterSchemaVersionEvent.java         |    46 -
 .../hive/metastore/events/AlterTableEvent.java  |    63 -
 .../hive/metastore/events/CommitTxnEvent.java   |    51 -
 .../metastore/events/ConfigChangeEvent.java     |    52 -
 .../metastore/events/CreateCatalogEvent.java    |    39 -
 .../metastore/events/CreateDatabaseEvent.java   |    43 -
 .../metastore/events/CreateFunctionEvent.java   |    43 -
 .../metastore/events/CreateISchemaEvent.java    |    39 -
 .../hive/metastore/events/CreateTableEvent.java |    43 -
 .../hive/metastore/events/DropCatalogEvent.java |    39 -
 .../metastore/events/DropConstraintEvent.java   |    57 -
 .../metastore/events/DropDatabaseEvent.java     |    43 -
 .../metastore/events/DropFunctionEvent.java     |    43 -
 .../hive/metastore/events/DropISchemaEvent.java |    39 -
 .../metastore/events/DropPartitionEvent.java    |    70 -
 .../events/DropSchemaVersionEvent.java          |    40 -
 .../hive/metastore/events/DropTableEvent.java   |    54 -
 .../hive/metastore/events/EventCleanerTask.java |    66 -
 .../hive/metastore/events/InsertEvent.java      |   132 -
 .../hive/metastore/events/ListenerEvent.java    |   187 -
 .../events/LoadPartitionDoneEvent.java          |    57 -
 .../hive/metastore/events/OpenTxnEvent.java     |    51 -
 .../metastore/events/PreAddPartitionEvent.java  |    79 -
 .../events/PreAddSchemaVersionEvent.java        |    39 -
 .../metastore/events/PreAlterCatalogEvent.java  |    40 -
 .../metastore/events/PreAlterDatabaseEvent.java |    47 -
 .../metastore/events/PreAlterISchemaEvent.java  |    44 -
 .../events/PreAlterPartitionEvent.java          |    65 -
 .../events/PreAlterSchemaVersionEvent.java      |    45 -
 .../metastore/events/PreAlterTableEvent.java    |    53 -
 .../events/PreAuthorizationCallEvent.java       |    33 -
 .../metastore/events/PreCreateCatalogEvent.java |    39 -
 .../events/PreCreateDatabaseEvent.java          |    43 -
 .../metastore/events/PreCreateISchemaEvent.java |    39 -
 .../metastore/events/PreCreateTableEvent.java   |    43 -
 .../metastore/events/PreDropCatalogEvent.java   |    39 -
 .../metastore/events/PreDropDatabaseEvent.java  |    43 -
 .../metastore/events/PreDropISchemaEvent.java   |    39 -
 .../metastore/events/PreDropPartitionEvent.java |    67 -
 .../events/PreDropSchemaVersionEvent.java       |    39 -
 .../metastore/events/PreDropTableEvent.java     |    55 -
 .../hive/metastore/events/PreEventContext.java  |    82 -
 .../events/PreLoadPartitionDoneEvent.java       |    64 -
 .../metastore/events/PreReadCatalogEvent.java   |    39 -
 .../metastore/events/PreReadDatabaseEvent.java  |    46 -
 .../metastore/events/PreReadISchemaEvent.java   |    39 -
 .../metastore/events/PreReadTableEvent.java     |    47 -
 .../events/PreReadhSchemaVersionEvent.java      |    36 -
 .../metastore/hooks/JDOConnectionURLHook.java   |    52 -
 .../metastore/messaging/AbortTxnMessage.java    |    36 -
 .../metastore/messaging/AcidWriteMessage.java   |    50 -
 .../messaging/AddForeignKeyMessage.java         |    36 -
 .../messaging/AddNotNullConstraintMessage.java  |    36 -
 .../messaging/AddPartitionMessage.java          |    68 -
 .../messaging/AddPrimaryKeyMessage.java         |    35 -
 .../messaging/AddUniqueConstraintMessage.java   |    36 -
 .../messaging/AllocWriteIdMessage.java          |    36 -
 .../messaging/AlterCatalogMessage.java          |    29 -
 .../messaging/AlterDatabaseMessage.java         |    36 -
 .../messaging/AlterPartitionMessage.java        |    69 -
 .../metastore/messaging/AlterTableMessage.java  |    58 -
 .../metastore/messaging/CommitTxnMessage.java   |    59 -
 .../messaging/CreateCatalogMessage.java         |    25 -
 .../messaging/CreateDatabaseMessage.java        |    31 -
 .../messaging/CreateFunctionMessage.java        |    46 -
 .../metastore/messaging/CreateTableMessage.java |    53 -
 .../metastore/messaging/DropCatalogMessage.java |    25 -
 .../messaging/DropConstraintMessage.java        |    29 -
 .../messaging/DropDatabaseMessage.java          |    27 -
 .../messaging/DropFunctionMessage.java          |    38 -
 .../messaging/DropPartitionMessage.java         |    49 -
 .../metastore/messaging/DropTableMessage.java   |    46 -
 .../hive/metastore/messaging/EventMessage.java  |   127 -
 .../hive/metastore/messaging/EventUtils.java    |   202 -
 .../hive/metastore/messaging/InsertMessage.java |    75 -
 .../messaging/MessageDeserializer.java          |   200 -
 .../metastore/messaging/MessageFactory.java     |   341 -
 .../metastore/messaging/OpenTxnMessage.java     |    38 -
 .../metastore/messaging/PartitionFiles.java     |    53 -
 .../messaging/event/filters/AndFilter.java      |    39 -
 .../messaging/event/filters/BasicFilter.java    |    33 -
 .../event/filters/DatabaseAndTableFilter.java   |    65 -
 .../event/filters/EventBoundaryFilter.java      |    34 -
 .../event/filters/MessageFormatFilter.java      |    36 -
 .../messaging/json/JSONAbortTxnMessage.java     |    88 -
 .../messaging/json/JSONAcidWriteMessage.java    |   150 -
 .../json/JSONAddForeignKeyMessage.java          |   102 -
 .../json/JSONAddNotNullConstraintMessage.java   |    97 -
 .../messaging/json/JSONAddPartitionMessage.java |   175 -
 .../json/JSONAddPrimaryKeyMessage.java          |   102 -
 .../json/JSONAddUniqueConstraintMessage.java    |    99 -
 .../messaging/json/JSONAllocWriteIdMessage.java |   113 -
 .../messaging/json/JSONAlterCatalogMessage.java |    90 -
 .../json/JSONAlterDatabaseMessage.java          |    97 -
 .../json/JSONAlterPartitionMessage.java         |   153 -
 .../messaging/json/JSONAlterTableMessage.java   |   128 -
 .../messaging/json/JSONCommitTxnMessage.java    |   183 -
 .../json/JSONCreateCatalogMessage.java          |    80 -
 .../json/JSONCreateDatabaseMessage.java         |    85 -
 .../json/JSONCreateFunctionMessage.java         |    87 -
 .../messaging/json/JSONCreateTableMessage.java  |   134 -
 .../messaging/json/JSONDropCatalogMessage.java  |    67 -
 .../json/JSONDropConstraintMessage.java         |    91 -
 .../messaging/json/JSONDropDatabaseMessage.java |    72 -
 .../messaging/json/JSONDropFunctionMessage.java |    79 -
 .../json/JSONDropPartitionMessage.java          |   135 -
 .../messaging/json/JSONDropTableMessage.java    |   121 -
 .../messaging/json/JSONInsertMessage.java       |   148 -
 .../messaging/json/JSONMessageDeserializer.java |   273 -
 .../messaging/json/JSONMessageFactory.java      |   402 -
 .../messaging/json/JSONOpenTxnMessage.java      |   106 -
 .../hive/metastore/metrics/JsonReporter.java    |   223 -
 .../hive/metastore/metrics/JvmPauseMonitor.java |   222 -
 .../hadoop/hive/metastore/metrics/Metrics.java  |   244 -
 .../metastore/metrics/MetricsConstants.java     |    46 -
 .../hive/metastore/metrics/PerfLogger.java      |   194 -
 .../hadoop/hive/metastore/model/MCatalog.java   |    58 -
 .../hive/metastore/model/MColumnDescriptor.java |    51 -
 .../hive/metastore/model/MConstraint.java       |   214 -
 .../hive/metastore/model/MCreationMetadata.java |    97 -
 .../hive/metastore/model/MDBPrivilege.java      |   142 -
 .../hadoop/hive/metastore/model/MDatabase.java  |   157 -
 .../hive/metastore/model/MDelegationToken.java  |    45 -
 .../hive/metastore/model/MFieldSchema.java      |    80 -
 .../hadoop/hive/metastore/model/MFunction.java  |   119 -
 .../hive/metastore/model/MGlobalPrivilege.java  |   130 -
 .../hadoop/hive/metastore/model/MISchema.java   |   107 -
 .../hadoop/hive/metastore/model/MIndex.java     |   200 -
 .../hadoop/hive/metastore/model/MMasterKey.java |    55 -
 .../metastore/model/MMetastoreDBProperties.java |    56 -
 .../hive/metastore/model/MNotificationLog.java  |   108 -
 .../metastore/model/MNotificationNextId.java    |    42 -
 .../hadoop/hive/metastore/model/MOrder.java     |    62 -
 .../hadoop/hive/metastore/model/MPartition.java |   162 -
 .../model/MPartitionColumnPrivilege.java        |   171 -
 .../model/MPartitionColumnStatistics.java       |   281 -
 .../hive/metastore/model/MPartitionEvent.java   |    97 -
 .../metastore/model/MPartitionPrivilege.java    |   149 -
 .../hive/metastore/model/MPrincipalDesc.java    |    59 -
 .../hive/metastore/model/MResourceUri.java      |    49 -
 .../hadoop/hive/metastore/model/MRole.java      |    80 -
 .../hadoop/hive/metastore/model/MRoleMap.java   |   120 -
 .../hive/metastore/model/MRuntimeStat.java      |    59 -
 .../hive/metastore/model/MSchemaVersion.java    |   127 -
 .../hadoop/hive/metastore/model/MSerDeInfo.java |   127 -
 .../metastore/model/MStorageDescriptor.java     |   277 -
 .../hive/metastore/model/MStringList.java       |    62 -
 .../hadoop/hive/metastore/model/MTable.java     |   283 -
 .../metastore/model/MTableColumnPrivilege.java  |   170 -
 .../metastore/model/MTableColumnStatistics.java |   272 -
 .../hive/metastore/model/MTablePrivilege.java   |   149 -
 .../model/MTxnWriteNotificationLog.java         |   123 -
 .../hadoop/hive/metastore/model/MType.java      |   105 -
 .../hive/metastore/model/MVersionTable.java     |    57 -
 .../hadoop/hive/metastore/model/MWMMapping.java |    83 -
 .../hadoop/hive/metastore/model/MWMPool.java    |    89 -
 .../hive/metastore/model/MWMResourcePlan.java   |   105 -
 .../hadoop/hive/metastore/model/MWMTrigger.java |    89 -
 .../hive/metastore/parser/ExpressionTree.java   |   606 -
 .../hadoop/hive/metastore/parser/Filter.g       |   486 -
 .../hive/metastore/parser/package-info.java     |    23 -
 .../spec/CompositePartitionSpecProxy.java       |   258 -
 .../spec/PartitionListComposingSpecProxy.java   |   209 -
 .../partition/spec/PartitionSpecProxy.java      |   220 -
 .../spec/PartitionSpecWithSharedSDProxy.java    |   192 -
 .../hive/metastore/security/DBTokenStore.java   |   180 -
 .../security/DelegationTokenIdentifier.java     |    52 -
 .../security/DelegationTokenSecretManager.java  |   134 -
 .../security/DelegationTokenSelector.java       |    33 -
 .../security/DelegationTokenStore.java          |   116 -
 .../metastore/security/DelegationTokenTool.java |   252 -
 .../security/HadoopThriftAuthBridge.java        |   700 -
 .../security/HadoopThriftAuthBridge23.java      |   114 -
 .../metastore/security/MemoryTokenStore.java    |   118 -
 .../MetastoreDelegationTokenManager.java        |   180 -
 .../security/TUGIContainingTransport.java       |    96 -
 .../TokenStoreDelegationTokenSecretManager.java |   334 -
 .../metastore/security/ZooKeeperTokenStore.java |   474 -
 .../hive/metastore/tools/HiveMetaTool.java      |   490 -
 .../hive/metastore/tools/HiveSchemaHelper.java  |   673 -
 .../metastore/tools/MetastoreSchemaTool.java    |   460 -
 .../hive/metastore/tools/SQLGenerator.java      |   187 -
 .../metastore/tools/SchemaToolCommandLine.java  |   308 -
 .../hive/metastore/tools/SchemaToolTask.java    |    32 -
 .../tools/SchemaToolTaskAlterCatalog.java       |    90 -
 .../tools/SchemaToolTaskCreateCatalog.java      |   132 -
 .../tools/SchemaToolTaskCreateUser.java         |   115 -
 .../metastore/tools/SchemaToolTaskInfo.java     |    43 -
 .../metastore/tools/SchemaToolTaskInit.java     |    73 -
 .../tools/SchemaToolTaskMoveDatabase.java       |    96 -
 .../tools/SchemaToolTaskMoveTable.java          |   142 -
 .../metastore/tools/SchemaToolTaskUpgrade.java  |   116 -
 .../metastore/tools/SchemaToolTaskValidate.java |   630 -
 .../hadoop/hive/metastore/tools/SmokeTest.java  |   102 -
 .../txn/AcidCompactionHistoryService.java       |    71 -
 .../metastore/txn/AcidHouseKeeperService.java   |    71 -
 .../txn/AcidOpenTxnsCounterService.java         |    72 -
 .../hive/metastore/txn/AcidWriteSetService.java |    69 -
 .../hive/metastore/txn/CompactionInfo.java      |   170 -
 .../metastore/txn/CompactionTxnHandler.java     |  1158 --
 .../hadoop/hive/metastore/txn/TxnDbUtil.java    |   621 -
 .../hadoop/hive/metastore/txn/TxnHandler.java   |  5094 -------
 .../hadoop/hive/metastore/txn/TxnStore.java     |   509 -
 .../hadoop/hive/metastore/txn/TxnUtils.java     |   481 -
 .../hive/metastore/utils/CommonCliOptions.java  |   160 -
 .../hadoop/hive/metastore/utils/FileUtils.java  |   537 -
 .../hadoop/hive/metastore/utils/HdfsUtils.java  |   395 -
 .../metastore/utils/HiveStrictManagedUtils.java |   100 -
 .../hadoop/hive/metastore/utils/LogUtils.java   |   140 -
 .../hive/metastore/utils/MetaStoreUtils.java    |  1840 ---
 .../metastore/utils/MetastoreVersionInfo.java   |   133 -
 .../hive/metastore/utils/SecurityUtils.java     |   313 -
 .../hive/metastore/utils/StringableMap.java     |    80 -
 .../src/main/resources/package.jdo              |  1426 --
 .../metastore-common/src/main/scripts/base      |   231 -
 .../src/main/scripts/ext/metastore.sh           |    41 -
 .../src/main/scripts/ext/schemaTool.sh          |    33 -
 .../src/main/scripts/ext/smokeTest.sh           |    33 -
 .../src/main/scripts/metastore-config.sh        |    69 -
 .../src/main/scripts/schematool                 |    21 -
 .../src/main/scripts/start-metastore            |    22 -
 .../main/sql/derby/hive-schema-1.2.0.derby.sql  |   405 -
 .../main/sql/derby/hive-schema-3.0.0.derby.sql  |   692 -
 .../main/sql/derby/hive-schema-3.1.0.derby.sql  |   720 -
 .../main/sql/derby/hive-schema-4.0.0.derby.sql  |   720 -
 .../sql/derby/upgrade-1.2.0-to-2.0.0.derby.sql  |    62 -
 .../sql/derby/upgrade-2.0.0-to-2.1.0.derby.sql  |    22 -
 .../sql/derby/upgrade-2.1.0-to-2.2.0.derby.sql  |    59 -
 .../sql/derby/upgrade-2.2.0-to-2.3.0.derby.sql  |     5 -
 .../sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql  |   283 -
 .../sql/derby/upgrade-3.0.0-to-3.1.0.derby.sql  |    68 -
 .../sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql  |     8 -
 .../src/main/sql/derby/upgrade.order.derby      |    18 -
 .../src/main/sql/mssql/create-user.mssql.sql    |     5 -
 .../main/sql/mssql/hive-schema-1.2.0.mssql.sql  |   947 --
 .../main/sql/mssql/hive-schema-3.0.0.mssql.sql  |  1246 --
 .../main/sql/mssql/hive-schema-3.1.0.mssql.sql  |  1284 --
 .../main/sql/mssql/hive-schema-4.0.0.mssql.sql  |  1284 --
 .../sql/mssql/upgrade-1.2.0-to-2.0.0.mssql.sql  |    73 -
 .../sql/mssql/upgrade-2.0.0-to-2.1.0.mssql.sql  |    39 -
 .../sql/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql  |    43 -
 .../sql/mssql/upgrade-2.2.0-to-2.3.0.mssql.sql  |     7 -
 .../sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql  |   352 -
 .../sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql  |    70 -
 .../sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql  |    10 -
 .../src/main/sql/mssql/upgrade.order.mssql      |    12 -
 .../src/main/sql/mysql/create-user.mysql.sql    |     8 -
 .../main/sql/mysql/hive-schema-1.2.0.mysql.sql  |   910 --
 .../main/sql/mysql/hive-schema-3.0.0.mysql.sql  |  1183 --
 .../main/sql/mysql/hive-schema-3.1.0.mysql.sql  |  1218 --
 .../main/sql/mysql/hive-schema-4.0.0.mysql.sql  |  1220 --
 .../sql/mysql/upgrade-1.2.0-to-2.0.0.mysql.sql  |    75 -
 .../sql/mysql/upgrade-2.0.0-to-2.1.0.mysql.sql  |    42 -
 .../sql/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql  |    43 -
 .../sql/mysql/upgrade-2.2.0-to-2.3.0.mysql.sql  |     8 -
 .../sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql  |   326 -
 .../sql/mysql/upgrade-3.0.0-to-3.1.0.mysql.sql  |    71 -
 .../sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql  |    10 -
 .../src/main/sql/mysql/upgrade.order.mysql      |    18 -
 .../src/main/sql/oracle/create-user.oracle.sql  |     3 -
 .../sql/oracle/hive-schema-1.2.0.oracle.sql     |   856 --
 .../sql/oracle/hive-schema-3.0.0.oracle.sql     |  1140 --
 .../sql/oracle/hive-schema-3.1.0.oracle.sql     |  1175 --
 .../sql/oracle/hive-schema-4.0.0.oracle.sql     |  1177 --
 .../oracle/upgrade-1.2.0-to-2.0.0.oracle.sql    |    83 -
 .../oracle/upgrade-2.0.0-to-2.1.0.oracle.sql    |    39 -
 .../oracle/upgrade-2.1.0-to-2.2.0.oracle.sql    |    58 -
 .../oracle/upgrade-2.2.0-to-2.3.0.oracle.sql    |     7 -
 .../oracle/upgrade-2.3.0-to-3.0.0.oracle.sql    |   343 -
 .../oracle/upgrade-3.0.0-to-3.1.0.oracle.sql    |    70 -
 .../oracle/upgrade-3.1.0-to-4.0.0.oracle.sql    |     9 -
 .../src/main/sql/oracle/upgrade.order.oracle    |    14 -
 .../main/sql/postgres/create-user.postgres.sql  |     2 -
 .../sql/postgres/hive-schema-1.2.0.postgres.sql |  1562 --
 .../sql/postgres/hive-schema-3.0.0.postgres.sql |  1827 ---
 .../sql/postgres/hive-schema-3.1.0.postgres.sql |  1866 ---
 .../sql/postgres/hive-schema-4.0.0.postgres.sql |  1868 ---
 .../upgrade-1.2.0-to-2.0.0.postgres.sql         |    73 -
 .../upgrade-2.0.0-to-2.1.0.postgres.sql         |    40 -
 .../upgrade-2.1.0-to-2.2.0.postgres.sql         |    39 -
 .../upgrade-2.2.0-to-2.3.0.postgres.sql         |     8 -
 .../upgrade-2.3.0-to-3.0.0.postgres.sql         |   360 -
 .../upgrade-3.0.0-to-3.1.0.postgres.sql         |    72 -
 .../upgrade-3.1.0-to-4.0.0.postgres.sql         |    10 -
 .../main/sql/postgres/upgrade.order.postgres    |    18 -
 .../hadoop/hive/common/TestStatsSetupConst.java |   114 -
 .../ndv/fm/TestFMSketchSerialization.java       |   101 -
 .../hive/common/ndv/hll/TestHLLNoBias.java      |   117 -
 .../common/ndv/hll/TestHLLSerialization.java    |   270 -
 .../hive/common/ndv/hll/TestHyperLogLog.java    |   338 -
 .../common/ndv/hll/TestHyperLogLogDense.java    |    85 -
 .../common/ndv/hll/TestHyperLogLogMerge.java    |   147 -
 .../common/ndv/hll/TestHyperLogLogSparse.java   |    84 -
 .../common/ndv/hll/TestSparseEncodeHash.java    |    59 -
 .../metastore/AlternateFailurePreListener.java  |    62 -
 .../metastore/DummyEndFunctionListener.java     |    47 -
 .../metastore/DummyJdoConnectionUrlHook.java    |    45 -
 .../hadoop/hive/metastore/DummyListener.java    |   126 -
 .../metastore/DummyMetaStoreInitListener.java   |    39 -
 .../hadoop/hive/metastore/DummyPreListener.java |    49 -
 .../DummyRawStoreControlledCommit.java          |  1268 --
 .../DummyRawStoreForJdoConnection.java          |  1249 --
 .../apache/hadoop/hive/metastore/FakeDerby.java |   404 -
 .../HiveMetaStoreClientPreCatalog.java          |  3546 -----
 .../InjectableBehaviourObjectStore.java         |   218 -
 .../hive/metastore/IpAddressListener.java       |   102 -
 .../hive/metastore/MetaStoreTestUtils.java      |   291 -
 .../MockPartitionExpressionForMetastore.java    |    58 -
 .../hive/metastore/NonCatCallsWithCatalog.java  |  1158 --
 .../hadoop/hive/metastore/TestAdminUser.java    |    49 -
 .../hive/metastore/TestAggregateStatsCache.java |   272 -
 .../metastore/TestCatalogNonDefaultClient.java  |    74 -
 .../metastore/TestCatalogNonDefaultSvr.java     |    68 -
 .../hive/metastore/TestCatalogOldClient.java    |    44 -
 .../hadoop/hive/metastore/TestDeadline.java     |   130 -
 .../metastore/TestEmbeddedHiveMetaStore.java    |    51 -
 .../hadoop/hive/metastore/TestFilterHooks.java  |   254 -
 .../hive/metastore/TestHiveAlterHandler.java    |   121 -
 .../hive/metastore/TestHiveMetaStore.java       |  3102 ----
 .../metastore/TestHiveMetaStoreGetMetaConf.java |   115 -
 .../TestHiveMetaStorePartitionSpecs.java        |   383 -
 .../TestHiveMetaStoreSchemaMethods.java         |  1248 --
 .../metastore/TestHiveMetaStoreTimeout.java     |   142 -
 .../hive/metastore/TestHiveMetaStoreTxns.java   |   267 -
 ...TestHiveMetaStoreWithEnvironmentContext.java |   191 -
 .../hive/metastore/TestHiveMetastoreCli.java    |    68 -
 .../hive/metastore/TestLockRequestBuilder.java  |   587 -
 .../hive/metastore/TestMarkPartition.java       |   118 -
 .../hive/metastore/TestMarkPartitionRemote.java |    34 -
 .../TestMetaStoreConnectionUrlHook.java         |    49 -
 .../TestMetaStoreEndFunctionListener.java       |   146 -
 .../metastore/TestMetaStoreEventListener.java   |   472 -
 .../TestMetaStoreEventListenerOnlyOnCommit.java |   121 -
 .../TestMetaStoreEventListenerWithOldConf.java  |   129 -
 .../metastore/TestMetaStoreInitListener.java    |    56 -
 .../metastore/TestMetaStoreListenersError.java  |    97 -
 .../metastore/TestMetaStoreSchemaFactory.java   |    72 -
 .../hive/metastore/TestMetaStoreSchemaInfo.java |    55 -
 .../hadoop/hive/metastore/TestObjectStore.java  |   904 --
 .../metastore/TestObjectStoreInitRetry.java     |   135 -
 .../metastore/TestObjectStoreSchemaMethods.java |   602 -
 .../hadoop/hive/metastore/TestOldSchema.java    |   233 -
 .../TestPartitionNameWhitelistValidation.java   |   125 -
 .../hive/metastore/TestRawStoreProxy.java       |    67 -
 .../hive/metastore/TestRemoteHiveMetaStore.java |    64 -
 .../TestRemoteHiveMetaStoreIpAddress.java       |    66 -
 .../TestRemoteUGIHiveMetaStoreIpAddress.java    |    31 -
 .../TestRetriesInRetryingHMSHandler.java        |   111 -
 .../hive/metastore/TestRetryingHMSHandler.java  |    82 -
 .../metastore/TestSetUGIOnBothClientServer.java |    34 -
 .../hive/metastore/TestSetUGIOnOnlyClient.java  |    35 -
 .../hive/metastore/TestSetUGIOnOnlyServer.java  |    35 -
 .../apache/hadoop/hive/metastore/TestStats.java |   732 -
 .../hive/metastore/TestTableIterable.java       |    76 -
 .../hive/metastore/VerifyingObjectStore.java    |   219 -
 .../annotation/MetastoreCheckinTest.java        |    25 -
 .../metastore/annotation/MetastoreTest.java     |    24 -
 .../metastore/annotation/MetastoreUnitTest.java |    25 -
 .../hive/metastore/cache/TestCachedStore.java   |  1075 --
 .../metastore/cache/TestCatalogCaching.java     |   142 -
 .../metastore/client/MetaStoreClientTest.java   |    95 -
 .../client/MetaStoreFactoryForTests.java        |   112 -
 .../metastore/client/TestAddPartitions.java     |  1736 ---
 .../client/TestAddPartitionsFromPartSpec.java   |  1267 --
 .../metastore/client/TestAlterPartitions.java   |  1154 --
 .../metastore/client/TestAppendPartitions.java  |   600 -
 .../hive/metastore/client/TestCatalogs.java     |   267 -
 .../metastore/client/TestCheckConstraint.java   |   363 -
 .../hive/metastore/client/TestDatabases.java    |   634 -
 .../metastore/client/TestDefaultConstraint.java |   363 -
 .../metastore/client/TestDropPartitions.java    |   659 -
 .../client/TestExchangePartitions.java          |  1337 --
 .../hive/metastore/client/TestForeignKey.java   |   538 -
 .../hive/metastore/client/TestFunctions.java    |   765 -
 .../metastore/client/TestGetPartitions.java     |   608 -
 .../hive/metastore/client/TestGetTableMeta.java |   330 -
 .../metastore/client/TestListPartitions.java    |  1522 --
 .../metastore/client/TestNotNullConstraint.java |   355 -
 .../hive/metastore/client/TestPrimaryKey.java   |   468 -
 .../hive/metastore/client/TestRuntimeStats.java |   154 -
 .../TestTablesCreateDropAlterTruncate.java      |  1400 --
 .../metastore/client/TestTablesGetExists.java   |   514 -
 .../hive/metastore/client/TestTablesList.java   |   320 -
 .../metastore/client/TestUniqueConstraint.java  |   356 -
 .../hive/metastore/client/package-info.java     |    22 -
 .../merge/DecimalColumnStatsMergerTest.java     |   235 -
 .../hive/metastore/conf/TestMetastoreConf.java  |   433 -
 .../TestDataSourceProviderFactory.java          |   248 -
 .../hive/metastore/dbinstall/DbInstallBase.java |   265 -
 .../hive/metastore/dbinstall/ITestMysql.java    |    82 -
 .../hive/metastore/dbinstall/ITestOracle.java   |    83 -
 .../hive/metastore/dbinstall/ITestPostgres.java |    82 -
 .../metastore/dbinstall/ITestSqlServer.java     |    84 -
 .../json/TestJSONMessageDeserializer.java       |   115 -
 .../hive/metastore/metrics/TestMetrics.java     |   164 -
 .../minihms/AbstractMetaStoreService.java       |   173 -
 .../minihms/ClusterMetaStoreForTests.java       |    32 -
 .../minihms/EmbeddedMetaStoreForTests.java      |    33 -
 .../hadoop/hive/metastore/minihms/MiniHMS.java  |    76 -
 .../minihms/RemoteMetaStoreForTests.java        |    43 -
 .../hive/metastore/minihms/package-info.java    |    23 -
 .../tools/TestMetastoreSchemaTool.java          |    70 -
 .../tools/TestSchemaToolForMetastore.java       |   534 -
 .../metastore/txn/TestTxnHandlerNegative.java   |    58 -
 .../hadoop/hive/metastore/txn/TestTxnUtils.java |   239 -
 .../hive/metastore/utils/TestHdfsUtils.java     |   348 -
 .../metastore/utils/TestMetaStoreUtils.java     |   291 -
 standalone-metastore/metastore-server/pom.xml   |   684 +
 .../metastore-server/src/assembly/bin.xml       |   134 +
 .../metastore-server/src/assembly/src.xml       |    53 +
 .../hadoop/hive/common/StatsSetupConst.java     |   336 +
 .../common/ndv/NumDistinctValueEstimator.java   |    51 +
 .../ndv/NumDistinctValueEstimatorFactory.java   |    75 +
 .../hadoop/hive/common/ndv/fm/FMSketch.java     |   359 +
 .../hive/common/ndv/fm/FMSketchUtils.java       |   132 +
 .../hive/common/ndv/hll/HLLConstants.java       |   933 ++
 .../hive/common/ndv/hll/HLLDenseRegister.java   |   202 +
 .../hadoop/hive/common/ndv/hll/HLLRegister.java |    50 +
 .../hive/common/ndv/hll/HLLSparseRegister.java  |   261 +
 .../hadoop/hive/common/ndv/hll/HyperLogLog.java |   664 +
 .../hive/common/ndv/hll/HyperLogLogUtils.java   |   409 +
 .../hive/metastore/AcidEventListener.java       |   146 +
 .../hive/metastore/AggregateStatsCache.java     |   571 +
 .../hadoop/hive/metastore/AlterHandler.java     |   204 +
 .../apache/hadoop/hive/metastore/Batchable.java |    86 +
 .../hadoop/hive/metastore/ColumnType.java       |   301 +
 .../hadoop/hive/metastore/DatabaseProduct.java  |    75 +
 .../apache/hadoop/hive/metastore/Deadline.java  |   172 +
 .../hive/metastore/DeadlineException.java       |    29 +
 .../DefaultPartitionExpressionProxy.java        |    57 +
 .../metastore/DefaultStorageSchemaReader.java   |    38 +
 .../hive/metastore/FileMetadataHandler.java     |   109 +
 .../hive/metastore/FileMetadataManager.java     |   119 +
 .../hive/metastore/HMSMetricsListener.java      |    90 +
 .../hadoop/hive/metastore/HiveAlterHandler.java |   961 ++
 .../hadoop/hive/metastore/HiveMetaStore.java    |  9602 +++++++++++++
 .../hive/metastore/HiveMetaStoreClient.java     |  3597 +++++
 .../hive/metastore/HiveMetaStoreFsImpl.java     |    55 +
 .../hive/metastore/IExtrapolatePartStatus.java  |    85 +
 .../hadoop/hive/metastore/IHMSHandler.java      |   109 +
 .../hadoop/hive/metastore/IMetaStoreClient.java |  3757 +++++
 .../hive/metastore/IMetaStoreSchemaInfo.java    |   115 +
 .../metastore/LinearExtrapolatePartStatus.java  |   106 +
 .../hive/metastore/LockComponentBuilder.java    |   121 +
 .../hive/metastore/LockRequestBuilder.java      |   168 +
 .../MaterializationsRebuildLockCleanerTask.java |    81 +
 .../MaterializationsRebuildLockHandler.java     |   216 +
 .../hive/metastore/MetaStoreDirectSql.java      |  2837 ++++
 .../metastore/MetaStoreEndFunctionContext.java  |    59 +
 .../metastore/MetaStoreEndFunctionListener.java |    58 +
 .../hive/metastore/MetaStoreEventListener.java  |   306 +
 .../MetaStoreEventListenerConstants.java        |    41 +
 .../hadoop/hive/metastore/MetaStoreFS.java      |    43 +
 .../hadoop/hive/metastore/MetaStoreInit.java    |   109 +
 .../hive/metastore/MetaStoreInitContext.java    |    27 +
 .../hive/metastore/MetaStoreInitListener.java   |    49 +
 .../metastore/MetaStoreListenerNotifier.java    |   375 +
 .../metastore/MetaStorePreEventListener.java    |    57 +
 .../hive/metastore/MetaStoreSchemaInfo.java     |   246 +
 .../metastore/MetaStoreSchemaInfoFactory.java   |    64 +
 .../hadoop/hive/metastore/MetaStoreThread.java  |    58 +
 .../hadoop/hive/metastore/MetadataStore.java    |    52 +
 .../hive/metastore/MetastoreTaskThread.java     |    38 +
 .../hadoop/hive/metastore/ObjectStore.java      | 12509 +++++++++++++++++
 .../hive/metastore/PartFilterExprUtil.java      |   165 +
 .../metastore/PartitionExpressionProxy.java     |    73 +
 .../apache/hadoop/hive/metastore/RawStore.java  |  1719 +++
 .../hadoop/hive/metastore/RawStoreProxy.java    |   114 +
 .../hive/metastore/ReplChangeManager.java       |   501 +
 .../hive/metastore/RetryingHMSHandler.java      |   232 +
 .../hive/metastore/RetryingMetaStoreClient.java |   341 +
 .../hive/metastore/RuntimeStatsCleanerTask.java |    66 +
 .../metastore/SessionPropertiesListener.java    |    46 +
 .../hive/metastore/StatObjectConverter.java     |   892 ++
 .../hive/metastore/TServerSocketKeepAlive.java  |    47 +
 .../hive/metastore/TSetIpAddressProcessor.java  |    62 +
 .../hive/metastore/TUGIBasedProcessor.java      |   183 +
 .../hadoop/hive/metastore/TableIterable.java    |   115 +
 .../hadoop/hive/metastore/ThreadPool.java       |    63 +
 .../TransactionalMetaStoreEventListener.java    |    39 +
 .../TransactionalValidationListener.java        |   487 +
 .../apache/hadoop/hive/metastore/Warehouse.java |   756 +
 .../hive/metastore/api/utils/DecimalUtils.java  |    49 +
 .../hive/metastore/cache/ByteArrayWrapper.java  |    45 +
 .../hadoop/hive/metastore/cache/CacheUtils.java |   136 +
 .../hive/metastore/cache/CachedStore.java       |  2532 ++++
 .../hive/metastore/cache/SharedCache.java       |  1650 +++
 .../client/builder/CatalogBuilder.java          |    62 +
 .../client/builder/ConstraintBuilder.java       |   115 +
 .../client/builder/DatabaseBuilder.java         |   122 +
 .../client/builder/FunctionBuilder.java         |   143 +
 .../GrantRevokePrivilegeRequestBuilder.java     |    63 +
 .../builder/HiveObjectPrivilegeBuilder.java     |    69 +
 .../client/builder/HiveObjectRefBuilder.java    |    69 +
 .../client/builder/ISchemaBuilder.java          |   102 +
 .../client/builder/PartitionBuilder.java        |   119 +
 .../builder/PrivilegeGrantInfoBuilder.java      |    84 +
 .../metastore/client/builder/RoleBuilder.java   |    55 +
 .../builder/SQLCheckConstraintBuilder.java      |    51 +
 .../builder/SQLDefaultConstraintBuilder.java    |    51 +
 .../client/builder/SQLForeignKeyBuilder.java    |   103 +
 .../builder/SQLNotNullConstraintBuilder.java    |    52 +
 .../client/builder/SQLPrimaryKeyBuilder.java    |    52 +
 .../builder/SQLUniqueConstraintBuilder.java     |    46 +
 .../client/builder/SchemaVersionBuilder.java    |   114 +
 .../client/builder/SerdeAndColsBuilder.java     |   124 +
 .../builder/StorageDescriptorBuilder.java       |   163 +
 .../metastore/client/builder/TableBuilder.java  |   224 +
 .../aggr/BinaryColumnStatsAggregator.java       |    61 +
 .../aggr/BooleanColumnStatsAggregator.java      |    62 +
 .../columnstats/aggr/ColumnStatsAggregator.java |    35 +
 .../aggr/ColumnStatsAggregatorFactory.java      |   113 +
 .../aggr/DateColumnStatsAggregator.java         |   360 +
 .../aggr/DecimalColumnStatsAggregator.java      |   375 +
 .../aggr/DoubleColumnStatsAggregator.java       |   348 +
 .../aggr/IExtrapolatePartStatus.java            |    47 +
 .../aggr/LongColumnStatsAggregator.java         |   348 +
 .../aggr/StringColumnStatsAggregator.java       |   304 +
 .../cache/DateColumnStatsDataInspector.java     |   124 +
 .../cache/DecimalColumnStatsDataInspector.java  |   124 +
 .../cache/DoubleColumnStatsDataInspector.java   |   124 +
 .../cache/LongColumnStatsDataInspector.java     |   124 +
 .../cache/StringColumnStatsDataInspector.java   |   125 +
 .../merge/BinaryColumnStatsMerger.java          |    35 +
 .../merge/BooleanColumnStatsMerger.java         |    35 +
 .../columnstats/merge/ColumnStatsMerger.java    |    31 +
 .../merge/ColumnStatsMergerFactory.java         |   120 +
 .../merge/DateColumnStatsMerger.java            |    59 +
 .../merge/DecimalColumnStatsMerger.java         |    85 +
 .../merge/DoubleColumnStatsMerger.java          |    54 +
 .../merge/LongColumnStatsMerger.java            |    54 +
 .../merge/StringColumnStatsMerger.java          |    54 +
 .../metastore/conf/ConfTemplatePrinter.java     |   150 +
 .../hive/metastore/conf/MetastoreConf.java      |  1688 +++
 .../hive/metastore/conf/TimeValidator.java      |    67 +
 .../datasource/BoneCPDataSourceProvider.java    |    87 +
 .../datasource/DataSourceProvider.java          |    79 +
 .../datasource/DataSourceProviderFactory.java   |    66 +
 .../datasource/DbCPDataSourceProvider.java      |   117 +
 .../datasource/HikariCPDataSourceProvider.java  |    89 +
 .../hive/metastore/datasource/package-info.java |    23 +
 .../hive/metastore/events/AbortTxnEvent.java    |    51 +
 .../hive/metastore/events/AcidWriteEvent.java   |    91 +
 .../metastore/events/AddForeignKeyEvent.java    |    41 +
 .../events/AddNotNullConstraintEvent.java       |    42 +
 .../metastore/events/AddPartitionEvent.java     |    84 +
 .../metastore/events/AddPrimaryKeyEvent.java    |    42 +
 .../metastore/events/AddSchemaVersionEvent.java |    40 +
 .../events/AddUniqueConstraintEvent.java        |    42 +
 .../metastore/events/AllocWriteIdEvent.java     |    57 +
 .../metastore/events/AlterCatalogEvent.java     |    44 +
 .../metastore/events/AlterDatabaseEvent.java    |    56 +
 .../metastore/events/AlterISchemaEvent.java     |    45 +
 .../metastore/events/AlterPartitionEvent.java   |    75 +
 .../events/AlterSchemaVersionEvent.java         |    46 +
 .../hive/metastore/events/AlterTableEvent.java  |    63 +
 .../hive/metastore/events/CommitTxnEvent.java   |    51 +
 .../metastore/events/ConfigChangeEvent.java     |    52 +
 .../metastore/events/CreateCatalogEvent.java    |    39 +
 .../metastore/events/CreateDatabaseEvent.java   |    43 +
 .../metastore/events/CreateFunctionEvent.java   |    43 +
 .../metastore/events/CreateISchemaEvent.java    |    39 +
 .../hive/metastore/events/CreateTableEvent.java |    43 +
 .../hive/metastore/events/DropCatalogEvent.java |    39 +
 .../metastore/events/DropConstraintEvent.java   |    57 +
 .../metastore/events/DropDatabaseEvent.java     |    43 +
 .../metastore/events/DropFunctionEvent.java     |    43 +
 .../hive/metastore/events/DropISchemaEvent.java |    39 +
 .../metastore/events/DropPartitionEvent.java    |    70 +
 .../events/DropSchemaVersionEvent.java          |    40 +
 .../hive/metastore/events/DropTableEvent.java   |    54 +
 .../hive/metastore/events/EventCleanerTask.java |    66 +
 .../hive/metastore/events/InsertEvent.java      |   132 +
 .../hive/metastore/events/ListenerEvent.java    |   187 +
 .../events/LoadPartitionDoneEvent.java          |    57 +
 .../hive/metastore/events/OpenTxnEvent.java     |    51 +
 .../metastore/events/PreAddPartitionEvent.java  |    79 +
 .../events/PreAddSchemaVersionEvent.java        |    39 +
 .../metastore/events/PreAlterCatalogEvent.java  |    40 +
 .../metastore/events/PreAlterDatabaseEvent.java |    47 +
 .../metastore/events/PreAlterISchemaEvent.java  |    44 +
 .../events/PreAlterPartitionEvent.java          |    65 +
 .../events/PreAlterSchemaVersionEvent.java      |    45 +
 .../metastore/events/PreAlterTableEvent.java    |    53 +
 .../events/PreAuthorizationCallEvent.java       |    33 +
 .../metastore/events/PreCreateCatalogEvent.java |    39 +
 .../events/PreCreateDatabaseEvent.java          |    43 +
 .../metastore/events/PreCreateISchemaEvent.java |    39 +
 .../metastore/events/PreCreateTableEvent.java   |    43 +
 .../metastore/events/PreDropCatalogEvent.java   |    39 +
 .../metastore/events/PreDropDatabaseEvent.java  |    43 +
 .../metastore/events/PreDropISchemaEvent.java   |    39 +
 .../metastore/events/PreDropPartitionEvent.java |    67 +
 .../events/PreDropSchemaVersionEvent.java       |    39 +
 .../metastore/events/PreDropTableEvent.java     |    55 +
 .../hive/metastore/events/PreEventContext.java  |    82 +
 .../events/PreLoadPartitionDoneEvent.java       |    64 +
 .../metastore/events/PreReadCatalogEvent.java   |    39 +
 .../metastore/events/PreReadDatabaseEvent.java  |    46 +
 .../metastore/events/PreReadISchemaEvent.java   |    39 +
 .../metastore/events/PreReadTableEvent.java     |    47 +
 .../events/PreReadhSchemaVersionEvent.java      |    36 +
 .../metastore/hooks/JDOConnectionURLHook.java   |    52 +
 .../metastore/messaging/AbortTxnMessage.java    |    36 +
 .../metastore/messaging/AcidWriteMessage.java   |    50 +
 .../messaging/AddForeignKeyMessage.java         |    36 +
 .../messaging/AddNotNullConstraintMessage.java  |    36 +
 .../messaging/AddPartitionMessage.java          |    68 +
 .../messaging/AddPrimaryKeyMessage.java         |    35 +
 .../messaging/AddUniqueConstraintMessage.java   |    36 +
 .../messaging/AllocWriteIdMessage.java          |    36 +
 .../messaging/AlterCatalogMessage.java          |    29 +
 .../messaging/AlterDatabaseMessage.java         |    36 +
 .../messaging/AlterPartitionMessage.java        |    69 +
 .../metastore/messaging/AlterTableMessage.java  |    58 +
 .../metastore/messaging/CommitTxnMessage.java   |    59 +
 .../messaging/CreateCatalogMessage.java         |    25 +
 .../messaging/CreateDatabaseMessage.java        |    31 +
 .../messaging/CreateFunctionMessage.java        |    46 +
 .../metastore/messaging/CreateTableMessage.java |    53 +
 .../metastore/messaging/DropCatalogMessage.java |    25 +
 .../messaging/DropConstraintMessage.java        |    29 +
 .../messaging/DropDatabaseMessage.java          |    27 +
 .../messaging/DropFunctionMessage.java          |    38 +
 .../messaging/DropPartitionMessage.java         |    49 +
 .../metastore/messaging/DropTableMessage.java   |    46 +
 .../hive/metastore/messaging/EventMessage.java  |   127 +
 .../hive/metastore/messaging/EventUtils.java    |   202 +
 .../hive/metastore/messaging/InsertMessage.java |    75 +
 .../messaging/MessageDeserializer.java          |   200 +
 .../metastore/messaging/MessageFactory.java     |   341 +
 .../metastore/messaging/OpenTxnMessage.java     |    38 +
 .../metastore/messaging/PartitionFiles.java     |    53 +
 .../messaging/event/filters/AndFilter.java      |    39 +
 .../messaging/event/filters/BasicFilter.java    |    33 +
 .../event/filters/DatabaseAndTableFilter.java   |    65 +
 .../event/filters/EventBoundaryFilter.java      |    34 +
 .../event/filters/MessageFormatFilter.java      |    36 +
 .../messaging/json/JSONAbortTxnMessage.java     |    88 +
 .../messaging/json/JSONAcidWriteMessage.java    |   150 +
 .../json/JSONAddForeignKeyMessage.java          |   102 +
 .../json/JSONAddNotNullConstraintMessage.java   |    97 +
 .../messaging/json/JSONAddPartitionMessage.java |   175 +
 .../json/JSONAddPrimaryKeyMessage.java          |   102 +
 .../json/JSONAddUniqueConstraintMessage.java    |    99 +
 .../messaging/json/JSONAllocWriteIdMessage.java |   113 +
 .../messaging/json/JSONAlterCatalogMessage.java |    90 +
 .../json/JSONAlterDatabaseMessage.java          |    97 +
 .../json/JSONAlterPartitionMessage.java         |   153 +
 .../messaging/json/JSONAlterTableMessage.java   |   128 +
 .../messaging/json/JSONCommitTxnMessage.java    |   183 +
 .../json/JSONCreateCatalogMessage.java          |    80 +
 .../json/JSONCreateDatabaseMessage.java         |    85 +
 .../json/JSONCreateFunctionMessage.java         |    87 +
 .../messaging/json/JSONCreateTableMessage.java  |   134 +
 .../messaging/json/JSONDropCatalogMessage.java  |    67 +
 .../json/JSONDropConstraintMessage.java         |    91 +
 .../messaging/json/JSONDropDatabaseMessage.java |    72 +
 .../messaging/json/JSONDropFunctionMessage.java |    79 +
 .../json/JSONDropPartitionMessage.java          |   135 +
 .../messaging/json/JSONDropTableMessage.java    |   121 +
 .../messaging/json/JSONInsertMessage.java       |   148 +
 .../messaging/json/JSONMessageDeserializer.java |   273 +
 .../messaging/json/JSONMessageFactory.java      |   402 +
 .../messaging/json/JSONOpenTxnMessage.java      |   106 +
 .../hive/metastore/metrics/JsonReporter.java    |   223 +
 .../hive/metastore/metrics/JvmPauseMonitor.java |   222 +
 .../hadoop/hive/metastore/metrics/Metrics.java  |   244 +
 .../metastore/metrics/MetricsConstants.java     |    46 +
 .../hive/metastore/metrics/PerfLogger.java      |   194 +
 .../hadoop/hive/metastore/model/MCatalog.java   |    58 +
 .../hive/metastore/model/MColumnDescriptor.java |    51 +
 .../hive/metastore/model/MConstraint.java       |   214 +
 .../hive/metastore/model/MCreationMetadata.java |    97 +
 .../hive/metastore/model/MDBPrivilege.java      |   142 +
 .../hadoop/hive/metastore/model/MDatabase.java  |   157 +
 .../hive/metastore/model/MDelegationToken.java  |    45 +
 .../hive/metastore/model/MFieldSchema.java      |    80 +
 .../hadoop/hive/metastore/model/MFunction.java  |   119 +
 .../hive/metastore/model/MGlobalPrivilege.java  |   130 +
 .../hadoop/hive/metastore/model/MISchema.java   |   107 +
 .../hadoop/hive/metastore/model/MIndex.java     |   200 +
 .../hadoop/hive/metastore/model/MMasterKey.java |    55 +
 .../metastore/model/MMetastoreDBProperties.java |    56 +
 .../hive/metastore/model/MNotificationLog.java  |   108 +
 .../metastore/model/MNotificationNextId.java    |    42 +
 .../hadoop/hive/metastore/model/MOrder.java     |    62 +
 .../hadoop/hive/metastore/model/MPartition.java |   162 +
 .../model/MPartitionColumnPrivilege.java        |   171 +
 .../model/MPartitionColumnStatistics.java       |   281 +
 .../hive/metastore/model/MPartitionEvent.java   |    97 +
 .../metastore/model/MPartitionPrivilege.java    |   149 +
 .../hive/metastore/model/MPrincipalDesc.java    |    59 +
 .../hive/metastore/model/MResourceUri.java      |    49 +
 .../hadoop/hive/metastore/model/MRole.java      |    80 +
 .../hadoop/hive/metastore/model/MRoleMap.java   |   120 +
 .../hive/metastore/model/MRuntimeStat.java      |    59 +
 .../hive/metastore/model/MSchemaVersion.java    |   127 +
 .../hadoop/hive/metastore/model/MSerDeInfo.java |   127 +
 .../metastore/model/MStorageDescriptor.java     |   277 +
 .../hive/metastore/model/MStringList.java       |    62 +
 .../hadoop/hive/metastore/model/MTable.java     |   283 +
 .../metastore/model/MTableColumnPrivilege.java  |   170 +
 .../metastore/model/MTableColumnStatistics.java |   272 +
 .../hive/metastore/model/MTablePrivilege.java   |   149 +
 .../model/MTxnWriteNotificationLog.java         |   123 +
 .../hadoop/hive/metastore/model/MType.java      |   105 +
 .../hive/metastore/model/MVersionTable.java     |    57 +
 .../hadoop/hive/metastore/model/MWMMapping.java |    83 +
 .../hadoop/hive/metastore/model/MWMPool.java    |    89 +
 .../hive/metastore/model/MWMResourcePlan.java   |   105 +
 .../hadoop/hive/metastore/model/MWMTrigger.java |    89 +
 .../hive/metastore/parser/ExpressionTree.java   |   606 +
 .../hadoop/hive/metastore/parser/Filter.g       |   486 +
 .../hive/metastore/parser/package-info.java     |    23 +
 .../spec/CompositePartitionSpecProxy.java       |   258 +
 .../spec/PartitionListComposingSpecProxy.java   |   209 +
 .../partition/spec/PartitionSpecProxy.java      |   220 +
 .../spec/PartitionSpecWithSharedSDProxy.java    |   192 +
 .../hive/metastore/security/DBTokenStore.java   |   180 +
 .../security/DelegationTokenIdentifier.java     |    52 +
 .../security/DelegationTokenSecretManager.java  |   134 +
 .../security/DelegationTokenSelector.java       |    33 +
 .../security/DelegationTokenStore.java          |   116 +
 .../metastore/security/DelegationTokenTool.java |   252 +
 .../security/HadoopThriftAuthBridge.java        |   700 +
 .../security/HadoopThriftAuthBridge23.java      |   114 +
 .../metastore/security/MemoryTokenStore.java    |   118 +
 .../MetastoreDelegationTokenManager.java        |   180 +
 .../security/TUGIContainingTransport.java       |    96 +
 .../TokenStoreDelegationTokenSecretManager.java |   334 +
 .../metastore/security/ZooKeeperTokenStore.java |   474 +
 .../hive/metastore/tools/HiveMetaTool.java      |   490 +
 .../hive/metastore/tools/HiveSchemaHelper.java  |   673 +
 .../metastore/tools/MetastoreSchemaTool.java    |   460 +
 .../hive/metastore/tools/SQLGenerator.java      |   187 +
 .../metastore/tools/SchemaToolCommandLine.java  |   308 +
 .../hive/metastore/tools/SchemaToolTask.java    |    32 +
 .../tools/SchemaToolTaskAlterCatalog.java       |    90 +
 .../tools/SchemaToolTaskCreateCatalog.java      |   132 +
 .../tools/SchemaToolTaskCreateUser.java         |   115 +
 .../metastore/tools/SchemaToolTaskInfo.java     |    43 +
 .../metastore/tools/SchemaToolTaskInit.java     |    73 +
 .../tools/SchemaToolTaskMoveDatabase.java       |    96 +
 .../tools/SchemaToolTaskMoveTable.java          |   142 +
 .../metastore/tools/SchemaToolTaskUpgrade.java  |   116 +
 .../metastore/tools/SchemaToolTaskValidate.java |   630 +
 .../hadoop/hive/metastore/tools/SmokeTest.java  |   102 +
 .../txn/AcidCompactionHistoryService.java       |    71 +
 .../metastore/txn/AcidHouseKeeperService.java   |    71 +
 .../txn/AcidOpenTxnsCounterService.java         |    72 +
 .../hive/metastore/txn/AcidWriteSetService.java |    69 +
 .../hive/metastore/txn/CompactionInfo.java      |   170 +
 .../metastore/txn/CompactionTxnHandler.java     |  1158 ++
 .../hadoop/hive/metastore/txn/TxnDbUtil.java    |   621 +
 .../hadoop/hive/metastore/txn/TxnHandler.java   |  5094 +++++++
 .../hadoop/hive/metastore/txn/TxnStore.java     |   509 +
 .../hadoop/hive/metastore/txn/TxnUtils.java     |   481 +
 .../hive/metastore/utils/CommonCliOptions.java  |   160 +
 .../hadoop/hive/metastore/utils/FileUtils.java  |   537 +
 .../hadoop/hive/metastore/utils/HdfsUtils.java  |   395 +
 .../metastore/utils/HiveStrictManagedUtils.java |   100 +
 .../hadoop/hive/metastore/utils/LogUtils.java   |   140 +
 .../hive/metastore/utils/MetaStoreUtils.java    |  1840 +++
 .../metastore/utils/MetastoreVersionInfo.java   |   133 +
 .../hive/metastore/utils/SecurityUtils.java     |   313 +
 .../hive/metastore/utils/StringableMap.java     |    80 +
 .../main/resources/datanucleus-log4j.properties |    17 +
 .../main/resources/metastore-log4j2.properties  |    71 +
 .../src/main/resources/metastore-site.xml       |    34 +
 .../src/main/resources/package.jdo              |  1426 ++
 .../src/main/resources/saveVersion.sh           |    91 +
 .../metastore-server/src/main/scripts/base      |   237 +
 .../src/main/scripts/ext/metastore.sh           |    41 +
 .../src/main/scripts/ext/schemaTool.sh          |    33 +
 .../src/main/scripts/ext/smokeTest.sh           |    33 +
 .../src/main/scripts/metastore-config.sh        |    69 +
 .../src/main/scripts/schematool                 |    21 +
 .../src/main/scripts/start-metastore            |    22 +
 .../main/sql/derby/hive-schema-1.2.0.derby.sql  |   405 +
 .../main/sql/derby/hive-schema-3.0.0.derby.sql  |   692 +
 .../main/sql/derby/hive-schema-3.1.0.derby.sql  |   720 +
 .../main/sql/derby/hive-schema-4.0.0.derby.sql  |   720 +
 .../sql/derby/upgrade-1.2.0-to-2.0.0.derby.sql  |    62 +
 .../sql/derby/upgrade-2.0.0-to-2.1.0.derby.sql  |    22 +
 .../sql/derby/upgrade-2.1.0-to-2.2.0.derby.sql  |    59 +
 .../sql/derby/upgrade-2.2.0-to-2.3.0.derby.sql  |     5 +
 .../sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql  |   283 +
 .../sql/derby/upgrade-3.0.0-to-3.1.0.derby.sql  |    68 +
 .../sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql  |     8 +
 .../src/main/sql/derby/upgrade.order.derby      |    18 +
 .../src/main/sql/mssql/create-user.mssql.sql    |     5 +
 .../main/sql/mssql/hive-schema-1.2.0.mssql.sql  |   947 ++
 .../main/sql/mssql/hive-schema-3.0.0.mssql.sql  |  1246 ++
 .../main/sql/mssql/hive-schema-3.1.0.mssql.sql  |  1284 ++
 .../main/sql/mssql/hive-schema-4.0.0.mssql.sql  |  1284 ++
 .../sql/mssql/upgrade-1.2.0-to-2.0.0.mssql.sql  |    73 +
 .../sql/mssql/upgrade-2.0.0-to-2.1.0.mssql.sql  |    39 +
 .../sql/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql  |    43 +
 .../sql/mssql/upgrade-2.2.0-to-2.3.0.mssql.sql  |     7 +
 .../sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql  |   352 +
 .../sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql  |    70 +
 .../sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql  |    10 +
 .../src/main/sql/mssql/upgrade.order.mssql      |    12 +
 .../src/main/sql/mysql/create-user.mysql.sql    |     8 +
 .../main/sql/mysql/hive-schema-1.2.0.mysql.sql  |   910 ++
 .../main/sql/mysql/hive-schema-3.0.0.mysql.sql  |  1183 ++
 .../main/sql/mysql/hive-schema-3.1.0.mysql.sql  |  1218 ++
 .../main/sql/mysql/hive-schema-4.0.0.mysql.sql  |  1220 ++
 .../sql/mysql/upgrade-1.2.0-to-2.0.0.mysql.sql  |    75 +
 .../sql/mysql/upgrade-2.0.0-to-2.1.0.mysql.sql  |    42 +
 .../sql/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql  |    43 +
 .../sql/mysql/upgrade-2.2.0-to-2.3.0.mysql.sql  |     8 +
 .../sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql  |   326 +
 .../sql/mysql/upgrade-3.0.0-to-3.1.0.mysql.sql  |    71 +
 .../sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql  |    10 +
 .../src/main/sql/mysql/upgrade.order.mysql      |    18 +
 .../src/main/sql/oracle/create-user.oracle.sql  |     3 +
 .../sql/oracle/hive-schema-1.2.0.oracle.sql     |   856 ++
 .../sql/oracle/hive-schema-3.0.0.oracle.sql     |  1140 ++
 .../sql/oracle/hive-schema-3.1.0.oracle.sql     |  1175 ++
 .../sql/oracle/hive-schema-4.0.0.oracle.sql     |  1177 ++
 .../oracle/upgrade-1.2.0-to-2.0.0.oracle.sql    |    83 +
 .../oracle/upgrade-2.0.0-to-2.1.0.oracle.sql    |    39 +
 .../oracle/upgrade-2.1.0-to-2.2.0.oracle.sql    |    58 +
 .../oracle/upgrade-2.2.0-to-2.3.0.oracle.sql    |     7 +
 .../oracle/upgrade-2.3.0-to-3.0.0.oracle.sql    |   343 +
 .../oracle/upgrade-3.0.0-to-3.1.0.oracle.sql    |    70 +
 .../oracle/upgrade-3.1.0-to-4.0.0.oracle.sql    |     9 +
 .../src/main/sql/oracle/upgrade.order.oracle    |    14 +
 .../main/sql/postgres/create-user.postgres.sql  |     2 +
 .../sql/postgres/hive-schema-1.2.0.postgres.sql |  1562 ++
 .../sql/postgres/hive-schema-3.0.0.postgres.sql |  1827 +++
 .../sql/postgres/hive-schema-3.1.0.postgres.sql |  1866 +++
 .../sql/postgres/hive-schema-4.0.0.postgres.sql |  1868 +++
 .../upgrade-1.2.0-to-2.0.0.postgres.sql         |    73 +
 .../upgrade-2.0.0-to-2.1.0.postgres.sql         |    40 +
 .../upgrade-2.1.0-to-2.2.0.postgres.sql         |    39 +
 .../upgrade-2.2.0-to-2.3.0.postgres.sql         |     8 +
 .../upgrade-2.3.0-to-3.0.0.postgres.sql         |   360 +
 .../upgrade-3.0.0-to-3.1.0.postgres.sql         |    72 +
 .../upgrade-3.1.0-to-4.0.0.postgres.sql         |    10 +
 .../main/sql/postgres/upgrade.order.postgres    |    18 +
 .../hadoop/hive/common/TestStatsSetupConst.java |   114 +
 .../ndv/fm/TestFMSketchSerialization.java       |   101 +
 .../hive/common/ndv/hll/TestHLLNoBias.java      |   117 +
 .../common/ndv/hll/TestHLLSerialization.java    |   270 +
 .../hive/common/ndv/hll/TestHyperLogLog.java    |   338 +
 .../common/ndv/hll/TestHyperLogLogDense.java    |    85 +
 .../common/ndv/hll/TestHyperLogLogMerge.java    |   147 +
 .../common/ndv/hll/TestHyperLogLogSparse.java   |    84 +
 .../common/ndv/hll/TestSparseEncodeHash.java    |    59 +
 .../metastore/AlternateFailurePreListener.java  |    62 +
 .../metastore/DummyEndFunctionListener.java     |    47 +
 .../metastore/DummyJdoConnectionUrlHook.java    |    45 +
 .../hadoop/hive/metastore/DummyListener.java    |   126 +
 .../metastore/DummyMetaStoreInitListener.java   |    39 +
 .../hadoop/hive/metastore/DummyPreListener.java |    49 +
 .../DummyRawStoreControlledCommit.java          |  1268 ++
 .../DummyRawStoreForJdoConnection.java          |  1249 ++
 .../apache/hadoop/hive/metastore/FakeDerby.java |   404 +
 .../HiveMetaStoreClientPreCatalog.java          |  3546 +++++
 .../InjectableBehaviourObjectStore.java         |   218 +
 .../hive/metastore/IpAddressListener.java       |   102 +
 .../hive/metastore/MetaStoreTestUtils.java      |   291 +
 .../MockPartitionExpressionForMetastore.java    |    58 +
 .../hive/metastore/NonCatCallsWithCatalog.java  |  1158 ++
 .../hadoop/hive/metastore/TestAdminUser.java    |    49 +
 .../hive/metastore/TestAggregateStatsCache.java |   272 +
 .../metastore/TestCatalogNonDefaultClient.java  |    74 +
 .../metastore/TestCatalogNonDefaultSvr.java     |    68 +
 .../hive/metastore/TestCatalogOldClient.java    |    44 +
 .../hadoop/hive/metastore/TestDeadline.java     |   130 +
 .../metastore/TestEmbeddedHiveMetaStore.java    |    51 +
 .../hadoop/hive/metastore/TestFilterHooks.java  |   254 +
 .../hive/metastore/TestHiveAlterHandler.java    |   121 +
 .../hive/metastore/TestHiveMetaStore.java       |  3102 ++++
 .../metastore/TestHiveMetaStoreGetMetaConf.java |   115 +
 .../TestHiveMetaStorePartitionSpecs.java        |   383 +
 .../TestHiveMetaStoreSchemaMethods.java         |  1248 ++
 .../metastore/TestHiveMetaStoreTimeout.java     |   142 +
 .../hive/metastore/TestHiveMetaStoreTxns.java   |   267 +
 ...TestHiveMetaStoreWithEnvironmentContext.java |   191 +
 .../hive/metastore/TestHiveMetastoreCli.java    |    68 +
 .../hive/metastore/TestLockRequestBuilder.java  |   587 +
 .../hive/metastore/TestMarkPartition.java       |   118 +
 .../hive/metastore/TestMarkPartitionRemote.java |    34 +
 .../TestMetaStoreConnectionUrlHook.java         |    49 +
 .../TestMetaStoreEndFunctionListener.java       |   146 +
 .../metastore/TestMetaStoreEventListener.java   |   472 +
 .../TestMetaStoreEventListenerOnlyOnCommit.java |   121 +
 .../TestMetaStoreEventListenerWithOldConf.java  |   129 +
 .../metastore/TestMetaStoreInitListener.java    |    56 +
 .../metastore/TestMetaStoreListenersError.java  |    97 +
 .../metastore/TestMetaStoreSchemaFactory.java   |    72 +
 .../hive/metastore/TestMetaStoreSchemaInfo.java |    55 +
 .../hadoop/hive/metastore/TestObjectStore.java  |   904 ++
 .../metastore/TestObjectStoreInitRetry.java     |   135 +
 .../metastore/TestObjectStoreSchemaMethods.java |   602 +
 .../hadoop/hive/metastore/TestOldSchema.java    |   233 +
 .../TestPartitionNameWhitelistValidation.java   |   125 +
 .../hive/metastore/TestRawStoreProxy.java       |    67 +
 .../hive/metastore/TestRemoteHiveMetaStore.java |    64 +
 .../TestRemoteHiveMetaStoreIpAddress.java       |    66 +
 .../TestRemoteUGIHiveMetaStoreIpAddress.java    |    31 +
 .../TestRetriesInRetryingHMSHandler.java        |   111 +
 .../hive/metastore/TestRetryingHMSHandler.java  |    82 +
 .../metastore/TestSetUGIOnBothClientServer.java |    34 +
 .../hive/metastore/TestSetUGIOnOnlyClient.java  |    35 +
 .../hive/metastore/TestSetUGIOnOnlyServer.java  |    35 +
 .../apache/hadoop/hive/metastore/TestStats.java |   732 +
 .../hive/metastore/TestTableIterable.java       |    76 +
 .../hive/metastore/VerifyingObjectStore.java    |   219 +
 .../annotation/MetastoreCheckinTest.java        |    25 +
 .../metastore/annotation/MetastoreTest.java     |    24 +
 .../metastore/annotation/MetastoreUnitTest.java |    25 +
 .../hive/metastore/cache/TestCachedStore.java   |  1075 ++
 .../metastore/cache/TestCatalogCaching.java     |   142 +
 .../metastore/client/MetaStoreClientTest.java   |    95 +
 .../client/MetaStoreFactoryForTests.java        |   112 +
 .../metastore/client/TestAddPartitions.java     |  1736 +++
 .../client/TestAddPartitionsFromPartSpec.java   |  1267 ++
 .../metastore/client/TestAlterPartitions.java   |  1154 ++
 .../metastore/client/TestAppendPartitions.java  |   600 +
 .../hive/metastore/client/TestCatalogs.java     |   267 +
 .../metastore/client/TestCheckConstraint.java   |   363 +
 .../hive/metastore/client/TestDatabases.java    |   634 +
 .../metastore/client/TestDefaultConstraint.java |   363 +
 .../metastore/client/TestDropPartitions.java    |   659 +
 .../client/TestExchangePartitions.java          |  1337 ++
 .../hive/metastore/client/TestForeignKey.java   |   538 +
 .../hive/metastore/client/TestFunctions.java    |   765 +
 .../metastore/client/TestGetPartitions.java     |   608 +
 .../hive/metastore/client/TestGetTableMeta.java |   330 +
 .../metastore/client/TestListPartitions.java    |  1522 ++
 .../metastore/client/TestNotNullConstraint.java |   355 +
 .../hive/metastore/client/TestPrimaryKey.java   |   468 +
 .../hive/metastore/client/TestRuntimeStats.java |   154 +
 .../TestTablesCreateDropAlterTruncate.java      |  1400 ++
 .../metastore/client/TestTablesGetExists.java   |   514 +
 .../hive/metastore/client/TestTablesList.java   |   320 +
 .../metastore/client/TestUniqueConstraint.java  |   356 +
 .../hive/metastore/client/package-info.java     |    22 +
 .../merge/DecimalColumnStatsMergerTest.java     |   235 +
 .../hive/metastore/conf/TestMetastoreConf.java  |   433 +
 .../TestDataSourceProviderFactory.java          |   248 +
 .../hive/metastore/dbinstall/DbInstallBase.java |   265 +
 .../hive/metastore/dbinstall/ITestMysql.java    |    82 +
 .../hive/metastore/dbinstall/ITestOracle.java   |    83 +
 .../hive/metastore/dbinstall/ITestPostgres.java |    82 +
 .../metastore/dbinstall/ITestSqlServer.java     |    84 +
 .../json/TestJSONMessageDeserializer.java       |   115 +
 .../hive/metastore/metrics/TestMetrics.java     |   164 +
 .../minihms/AbstractMetaStoreService.java       |   173 +
 .../minihms/ClusterMetaStoreForTests.java       |    32 +
 .../minihms/EmbeddedMetaStoreForTests.java      |    33 +
 .../hadoop/hive/metastore/minihms/MiniHMS.java  |    76 +
 .../minihms/RemoteMetaStoreForTests.java        |    43 +
 .../tools/TestMetastoreSchemaTool.java          |    70 +
 .../tools/TestSchemaToolForMetastore.java       |   534 +
 .../metastore/txn/TestTxnHandlerNegative.java   |    58 +
 .../hadoop/hive/metastore/txn/TestTxnUtils.java |   239 +
 .../hive/metastore/utils/TestHdfsUtils.java     |   348 +
 .../metastore/utils/TestMetaStoreUtils.java     |   291 +
 .../src/test/resources/log4j2.properties        |    35 +
 standalone-metastore/pom.xml                    |     1 +
 upgrade-acid/pom.xml                            |     2 +-
 1427 files changed, 180054 insertions(+), 172313 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/RelOptHiveTable.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/ql/src/test/results/clientpositive/acid_nullscan.q.out
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
----------------------------------------------------------------------


[32/50] [abbrv] hive git commit: HIVE-20047 : remove txnID argument for txn stats methods (Sergey Shelukhin)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 923fe2c..7fc1e43 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -2510,7 +2510,6 @@ class TruncateTableRequest:
    - dbName
    - tableName
    - partNames
-   - txnId
    - writeId
    - validWriteIdList
   """
@@ -2520,16 +2519,14 @@ class TruncateTableRequest:
     (1, TType.STRING, 'dbName', None, None, ), # 1
     (2, TType.STRING, 'tableName', None, None, ), # 2
     (3, TType.LIST, 'partNames', (TType.STRING,None), None, ), # 3
-    (4, TType.I64, 'txnId', None, -1, ), # 4
-    (5, TType.I64, 'writeId', None, -1, ), # 5
-    (6, TType.STRING, 'validWriteIdList', None, None, ), # 6
+    (4, TType.I64, 'writeId', None, -1, ), # 4
+    (5, TType.STRING, 'validWriteIdList', None, None, ), # 5
   )
 
-  def __init__(self, dbName=None, tableName=None, partNames=None, txnId=thrift_spec[4][4], writeId=thrift_spec[5][4], validWriteIdList=None,):
+  def __init__(self, dbName=None, tableName=None, partNames=None, writeId=thrift_spec[4][4], validWriteIdList=None,):
     self.dbName = dbName
     self.tableName = tableName
     self.partNames = partNames
-    self.txnId = txnId
     self.writeId = writeId
     self.validWriteIdList = validWriteIdList
 
@@ -2564,15 +2561,10 @@ class TruncateTableRequest:
           iprot.skip(ftype)
       elif fid == 4:
         if ftype == TType.I64:
-          self.txnId = iprot.readI64()
-        else:
-          iprot.skip(ftype)
-      elif fid == 5:
-        if ftype == TType.I64:
           self.writeId = iprot.readI64()
         else:
           iprot.skip(ftype)
-      elif fid == 6:
+      elif fid == 5:
         if ftype == TType.STRING:
           self.validWriteIdList = iprot.readString()
         else:
@@ -2602,16 +2594,12 @@ class TruncateTableRequest:
         oprot.writeString(iter75)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
-    if self.txnId is not None:
-      oprot.writeFieldBegin('txnId', TType.I64, 4)
-      oprot.writeI64(self.txnId)
-      oprot.writeFieldEnd()
     if self.writeId is not None:
-      oprot.writeFieldBegin('writeId', TType.I64, 5)
+      oprot.writeFieldBegin('writeId', TType.I64, 4)
       oprot.writeI64(self.writeId)
       oprot.writeFieldEnd()
     if self.validWriteIdList is not None:
-      oprot.writeFieldBegin('validWriteIdList', TType.STRING, 6)
+      oprot.writeFieldBegin('validWriteIdList', TType.STRING, 5)
       oprot.writeString(self.validWriteIdList)
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -2630,7 +2618,6 @@ class TruncateTableRequest:
     value = (value * 31) ^ hash(self.dbName)
     value = (value * 31) ^ hash(self.tableName)
     value = (value * 31) ^ hash(self.partNames)
-    value = (value * 31) ^ hash(self.txnId)
     value = (value * 31) ^ hash(self.writeId)
     value = (value * 31) ^ hash(self.validWriteIdList)
     return value
@@ -7316,7 +7303,6 @@ class SetPartitionsStatsRequest:
   Attributes:
    - colStats
    - needMerge
-   - txnId
    - writeId
    - validWriteIdList
   """
@@ -7325,15 +7311,13 @@ class SetPartitionsStatsRequest:
     None, # 0
     (1, TType.LIST, 'colStats', (TType.STRUCT,(ColumnStatistics, ColumnStatistics.thrift_spec)), None, ), # 1
     (2, TType.BOOL, 'needMerge', None, None, ), # 2
-    (3, TType.I64, 'txnId', None, -1, ), # 3
-    (4, TType.I64, 'writeId', None, -1, ), # 4
-    (5, TType.STRING, 'validWriteIdList', None, None, ), # 5
+    (3, TType.I64, 'writeId', None, -1, ), # 3
+    (4, TType.STRING, 'validWriteIdList', None, None, ), # 4
   )
 
-  def __init__(self, colStats=None, needMerge=None, txnId=thrift_spec[3][4], writeId=thrift_spec[4][4], validWriteIdList=None,):
+  def __init__(self, colStats=None, needMerge=None, writeId=thrift_spec[3][4], validWriteIdList=None,):
     self.colStats = colStats
     self.needMerge = needMerge
-    self.txnId = txnId
     self.writeId = writeId
     self.validWriteIdList = validWriteIdList
 
@@ -7364,15 +7348,10 @@ class SetPartitionsStatsRequest:
           iprot.skip(ftype)
       elif fid == 3:
         if ftype == TType.I64:
-          self.txnId = iprot.readI64()
-        else:
-          iprot.skip(ftype)
-      elif fid == 4:
-        if ftype == TType.I64:
           self.writeId = iprot.readI64()
         else:
           iprot.skip(ftype)
-      elif fid == 5:
+      elif fid == 4:
         if ftype == TType.STRING:
           self.validWriteIdList = iprot.readString()
         else:
@@ -7398,16 +7377,12 @@ class SetPartitionsStatsRequest:
       oprot.writeFieldBegin('needMerge', TType.BOOL, 2)
       oprot.writeBool(self.needMerge)
       oprot.writeFieldEnd()
-    if self.txnId is not None:
-      oprot.writeFieldBegin('txnId', TType.I64, 3)
-      oprot.writeI64(self.txnId)
-      oprot.writeFieldEnd()
     if self.writeId is not None:
-      oprot.writeFieldBegin('writeId', TType.I64, 4)
+      oprot.writeFieldBegin('writeId', TType.I64, 3)
       oprot.writeI64(self.writeId)
       oprot.writeFieldEnd()
     if self.validWriteIdList is not None:
-      oprot.writeFieldBegin('validWriteIdList', TType.STRING, 5)
+      oprot.writeFieldBegin('validWriteIdList', TType.STRING, 4)
       oprot.writeString(self.validWriteIdList)
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -7423,7 +7398,6 @@ class SetPartitionsStatsRequest:
     value = 17
     value = (value * 31) ^ hash(self.colStats)
     value = (value * 31) ^ hash(self.needMerge)
-    value = (value * 31) ^ hash(self.txnId)
     value = (value * 31) ^ hash(self.writeId)
     value = (value * 31) ^ hash(self.validWriteIdList)
     return value
@@ -9722,7 +9696,6 @@ class TableStatsRequest:
    - tblName
    - colNames
    - catName
-   - txnId
    - validWriteIdList
   """
 
@@ -9732,16 +9705,14 @@ class TableStatsRequest:
     (2, TType.STRING, 'tblName', None, None, ), # 2
     (3, TType.LIST, 'colNames', (TType.STRING,None), None, ), # 3
     (4, TType.STRING, 'catName', None, None, ), # 4
-    (5, TType.I64, 'txnId', None, -1, ), # 5
-    (6, TType.STRING, 'validWriteIdList', None, None, ), # 6
+    (5, TType.STRING, 'validWriteIdList', None, None, ), # 5
   )
 
-  def __init__(self, dbName=None, tblName=None, colNames=None, catName=None, txnId=thrift_spec[5][4], validWriteIdList=None,):
+  def __init__(self, dbName=None, tblName=None, colNames=None, catName=None, validWriteIdList=None,):
     self.dbName = dbName
     self.tblName = tblName
     self.colNames = colNames
     self.catName = catName
-    self.txnId = txnId
     self.validWriteIdList = validWriteIdList
 
   def read(self, iprot):
@@ -9779,11 +9750,6 @@ class TableStatsRequest:
         else:
           iprot.skip(ftype)
       elif fid == 5:
-        if ftype == TType.I64:
-          self.txnId = iprot.readI64()
-        else:
-          iprot.skip(ftype)
-      elif fid == 6:
         if ftype == TType.STRING:
           self.validWriteIdList = iprot.readString()
         else:
@@ -9817,12 +9783,8 @@ class TableStatsRequest:
       oprot.writeFieldBegin('catName', TType.STRING, 4)
       oprot.writeString(self.catName)
       oprot.writeFieldEnd()
-    if self.txnId is not None:
-      oprot.writeFieldBegin('txnId', TType.I64, 5)
-      oprot.writeI64(self.txnId)
-      oprot.writeFieldEnd()
     if self.validWriteIdList is not None:
-      oprot.writeFieldBegin('validWriteIdList', TType.STRING, 6)
+      oprot.writeFieldBegin('validWriteIdList', TType.STRING, 5)
       oprot.writeString(self.validWriteIdList)
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -9844,7 +9806,6 @@ class TableStatsRequest:
     value = (value * 31) ^ hash(self.tblName)
     value = (value * 31) ^ hash(self.colNames)
     value = (value * 31) ^ hash(self.catName)
-    value = (value * 31) ^ hash(self.txnId)
     value = (value * 31) ^ hash(self.validWriteIdList)
     return value
 
@@ -9867,7 +9828,6 @@ class PartitionsStatsRequest:
    - colNames
    - partNames
    - catName
-   - txnId
    - validWriteIdList
   """
 
@@ -9878,17 +9838,15 @@ class PartitionsStatsRequest:
     (3, TType.LIST, 'colNames', (TType.STRING,None), None, ), # 3
     (4, TType.LIST, 'partNames', (TType.STRING,None), None, ), # 4
     (5, TType.STRING, 'catName', None, None, ), # 5
-    (6, TType.I64, 'txnId', None, -1, ), # 6
-    (7, TType.STRING, 'validWriteIdList', None, None, ), # 7
+    (6, TType.STRING, 'validWriteIdList', None, None, ), # 6
   )
 
-  def __init__(self, dbName=None, tblName=None, colNames=None, partNames=None, catName=None, txnId=thrift_spec[6][4], validWriteIdList=None,):
+  def __init__(self, dbName=None, tblName=None, colNames=None, partNames=None, catName=None, validWriteIdList=None,):
     self.dbName = dbName
     self.tblName = tblName
     self.colNames = colNames
     self.partNames = partNames
     self.catName = catName
-    self.txnId = txnId
     self.validWriteIdList = validWriteIdList
 
   def read(self, iprot):
@@ -9936,11 +9894,6 @@ class PartitionsStatsRequest:
         else:
           iprot.skip(ftype)
       elif fid == 6:
-        if ftype == TType.I64:
-          self.txnId = iprot.readI64()
-        else:
-          iprot.skip(ftype)
-      elif fid == 7:
         if ftype == TType.STRING:
           self.validWriteIdList = iprot.readString()
         else:
@@ -9981,12 +9934,8 @@ class PartitionsStatsRequest:
       oprot.writeFieldBegin('catName', TType.STRING, 5)
       oprot.writeString(self.catName)
       oprot.writeFieldEnd()
-    if self.txnId is not None:
-      oprot.writeFieldBegin('txnId', TType.I64, 6)
-      oprot.writeI64(self.txnId)
-      oprot.writeFieldEnd()
     if self.validWriteIdList is not None:
-      oprot.writeFieldBegin('validWriteIdList', TType.STRING, 7)
+      oprot.writeFieldBegin('validWriteIdList', TType.STRING, 6)
       oprot.writeString(self.validWriteIdList)
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -10011,7 +9960,6 @@ class PartitionsStatsRequest:
     value = (value * 31) ^ hash(self.colNames)
     value = (value * 31) ^ hash(self.partNames)
     value = (value * 31) ^ hash(self.catName)
-    value = (value * 31) ^ hash(self.txnId)
     value = (value * 31) ^ hash(self.validWriteIdList)
     return value
 
@@ -10122,7 +10070,6 @@ class AddPartitionsRequest:
    - ifNotExists
    - needResult
    - catName
-   - txnId
    - validWriteIdList
   """
 
@@ -10134,18 +10081,16 @@ class AddPartitionsRequest:
     (4, TType.BOOL, 'ifNotExists', None, None, ), # 4
     (5, TType.BOOL, 'needResult', None, True, ), # 5
     (6, TType.STRING, 'catName', None, None, ), # 6
-    (7, TType.I64, 'txnId', None, -1, ), # 7
-    (8, TType.STRING, 'validWriteIdList', None, None, ), # 8
+    (7, TType.STRING, 'validWriteIdList', None, None, ), # 7
   )
 
-  def __init__(self, dbName=None, tblName=None, parts=None, ifNotExists=None, needResult=thrift_spec[5][4], catName=None, txnId=thrift_spec[7][4], validWriteIdList=None,):
+  def __init__(self, dbName=None, tblName=None, parts=None, ifNotExists=None, needResult=thrift_spec[5][4], catName=None, validWriteIdList=None,):
     self.dbName = dbName
     self.tblName = tblName
     self.parts = parts
     self.ifNotExists = ifNotExists
     self.needResult = needResult
     self.catName = catName
-    self.txnId = txnId
     self.validWriteIdList = validWriteIdList
 
   def read(self, iprot):
@@ -10194,11 +10139,6 @@ class AddPartitionsRequest:
         else:
           iprot.skip(ftype)
       elif fid == 7:
-        if ftype == TType.I64:
-          self.txnId = iprot.readI64()
-        else:
-          iprot.skip(ftype)
-      elif fid == 8:
         if ftype == TType.STRING:
           self.validWriteIdList = iprot.readString()
         else:
@@ -10240,12 +10180,8 @@ class AddPartitionsRequest:
       oprot.writeFieldBegin('catName', TType.STRING, 6)
       oprot.writeString(self.catName)
       oprot.writeFieldEnd()
-    if self.txnId is not None:
-      oprot.writeFieldBegin('txnId', TType.I64, 7)
-      oprot.writeI64(self.txnId)
-      oprot.writeFieldEnd()
     if self.validWriteIdList is not None:
-      oprot.writeFieldBegin('validWriteIdList', TType.STRING, 8)
+      oprot.writeFieldBegin('validWriteIdList', TType.STRING, 7)
       oprot.writeString(self.validWriteIdList)
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -10271,7 +10207,6 @@ class AddPartitionsRequest:
     value = (value * 31) ^ hash(self.ifNotExists)
     value = (value * 31) ^ hash(self.needResult)
     value = (value * 31) ^ hash(self.catName)
-    value = (value * 31) ^ hash(self.txnId)
     value = (value * 31) ^ hash(self.validWriteIdList)
     return value
 
@@ -17526,7 +17461,6 @@ class GetTableRequest:
    - tblName
    - capabilities
    - catName
-   - txnId
    - validWriteIdList
   """
 
@@ -17536,16 +17470,15 @@ class GetTableRequest:
     (2, TType.STRING, 'tblName', None, None, ), # 2
     (3, TType.STRUCT, 'capabilities', (ClientCapabilities, ClientCapabilities.thrift_spec), None, ), # 3
     (4, TType.STRING, 'catName', None, None, ), # 4
-    (5, TType.I64, 'txnId', None, -1, ), # 5
+    None, # 5
     (6, TType.STRING, 'validWriteIdList', None, None, ), # 6
   )
 
-  def __init__(self, dbName=None, tblName=None, capabilities=None, catName=None, txnId=thrift_spec[5][4], validWriteIdList=None,):
+  def __init__(self, dbName=None, tblName=None, capabilities=None, catName=None, validWriteIdList=None,):
     self.dbName = dbName
     self.tblName = tblName
     self.capabilities = capabilities
     self.catName = catName
-    self.txnId = txnId
     self.validWriteIdList = validWriteIdList
 
   def read(self, iprot):
@@ -17578,11 +17511,6 @@ class GetTableRequest:
           self.catName = iprot.readString()
         else:
           iprot.skip(ftype)
-      elif fid == 5:
-        if ftype == TType.I64:
-          self.txnId = iprot.readI64()
-        else:
-          iprot.skip(ftype)
       elif fid == 6:
         if ftype == TType.STRING:
           self.validWriteIdList = iprot.readString()
@@ -17614,10 +17542,6 @@ class GetTableRequest:
       oprot.writeFieldBegin('catName', TType.STRING, 4)
       oprot.writeString(self.catName)
       oprot.writeFieldEnd()
-    if self.txnId is not None:
-      oprot.writeFieldBegin('txnId', TType.I64, 5)
-      oprot.writeI64(self.txnId)
-      oprot.writeFieldEnd()
     if self.validWriteIdList is not None:
       oprot.writeFieldBegin('validWriteIdList', TType.STRING, 6)
       oprot.writeString(self.validWriteIdList)
@@ -17639,7 +17563,6 @@ class GetTableRequest:
     value = (value * 31) ^ hash(self.tblName)
     value = (value * 31) ^ hash(self.capabilities)
     value = (value * 31) ^ hash(self.catName)
-    value = (value * 31) ^ hash(self.txnId)
     value = (value * 31) ^ hash(self.validWriteIdList)
     return value
 
@@ -22583,7 +22506,6 @@ class AlterPartitionsRequest:
    - tableName
    - partitions
    - environmentContext
-   - txnId
    - writeId
    - validWriteIdList
   """
@@ -22595,18 +22517,16 @@ class AlterPartitionsRequest:
     (3, TType.STRING, 'tableName', None, None, ), # 3
     (4, TType.LIST, 'partitions', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 4
     (5, TType.STRUCT, 'environmentContext', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 5
-    (6, TType.I64, 'txnId', None, -1, ), # 6
-    (7, TType.I64, 'writeId', None, -1, ), # 7
-    (8, TType.STRING, 'validWriteIdList', None, None, ), # 8
+    (6, TType.I64, 'writeId', None, -1, ), # 6
+    (7, TType.STRING, 'validWriteIdList', None, None, ), # 7
   )
 
-  def __init__(self, catName=None, dbName=None, tableName=None, partitions=None, environmentContext=None, txnId=thrift_spec[6][4], writeId=thrift_spec[7][4], validWriteIdList=None,):
+  def __init__(self, catName=None, dbName=None, tableName=None, partitions=None, environmentContext=None, writeId=thrift_spec[6][4], validWriteIdList=None,):
     self.catName = catName
     self.dbName = dbName
     self.tableName = tableName
     self.partitions = partitions
     self.environmentContext = environmentContext
-    self.txnId = txnId
     self.writeId = writeId
     self.validWriteIdList = validWriteIdList
 
@@ -22653,15 +22573,10 @@ class AlterPartitionsRequest:
           iprot.skip(ftype)
       elif fid == 6:
         if ftype == TType.I64:
-          self.txnId = iprot.readI64()
-        else:
-          iprot.skip(ftype)
-      elif fid == 7:
-        if ftype == TType.I64:
           self.writeId = iprot.readI64()
         else:
           iprot.skip(ftype)
-      elif fid == 8:
+      elif fid == 7:
         if ftype == TType.STRING:
           self.validWriteIdList = iprot.readString()
         else:
@@ -22699,16 +22614,12 @@ class AlterPartitionsRequest:
       oprot.writeFieldBegin('environmentContext', TType.STRUCT, 5)
       self.environmentContext.write(oprot)
       oprot.writeFieldEnd()
-    if self.txnId is not None:
-      oprot.writeFieldBegin('txnId', TType.I64, 6)
-      oprot.writeI64(self.txnId)
-      oprot.writeFieldEnd()
     if self.writeId is not None:
-      oprot.writeFieldBegin('writeId', TType.I64, 7)
+      oprot.writeFieldBegin('writeId', TType.I64, 6)
       oprot.writeI64(self.writeId)
       oprot.writeFieldEnd()
     if self.validWriteIdList is not None:
-      oprot.writeFieldBegin('validWriteIdList', TType.STRING, 8)
+      oprot.writeFieldBegin('validWriteIdList', TType.STRING, 7)
       oprot.writeString(self.validWriteIdList)
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -22731,7 +22642,6 @@ class AlterPartitionsRequest:
     value = (value * 31) ^ hash(self.tableName)
     value = (value * 31) ^ hash(self.partitions)
     value = (value * 31) ^ hash(self.environmentContext)
-    value = (value * 31) ^ hash(self.txnId)
     value = (value * 31) ^ hash(self.writeId)
     value = (value * 31) ^ hash(self.validWriteIdList)
     return value
@@ -22801,7 +22711,6 @@ class RenamePartitionRequest:
    - tableName
    - partVals
    - newPart
-   - txnId
    - validWriteIdList
   """
 
@@ -22812,17 +22721,15 @@ class RenamePartitionRequest:
     (3, TType.STRING, 'tableName', None, None, ), # 3
     (4, TType.LIST, 'partVals', (TType.STRING,None), None, ), # 4
     (5, TType.STRUCT, 'newPart', (Partition, Partition.thrift_spec), None, ), # 5
-    (6, TType.I64, 'txnId', None, -1, ), # 6
-    (7, TType.STRING, 'validWriteIdList', None, None, ), # 7
+    (6, TType.STRING, 'validWriteIdList', None, None, ), # 6
   )
 
-  def __init__(self, catName=None, dbName=None, tableName=None, partVals=None, newPart=None, txnId=thrift_spec[6][4], validWriteIdList=None,):
+  def __init__(self, catName=None, dbName=None, tableName=None, partVals=None, newPart=None, validWriteIdList=None,):
     self.catName = catName
     self.dbName = dbName
     self.tableName = tableName
     self.partVals = partVals
     self.newPart = newPart
-    self.txnId = txnId
     self.validWriteIdList = validWriteIdList
 
   def read(self, iprot):
@@ -22866,11 +22773,6 @@ class RenamePartitionRequest:
         else:
           iprot.skip(ftype)
       elif fid == 6:
-        if ftype == TType.I64:
-          self.txnId = iprot.readI64()
-        else:
-          iprot.skip(ftype)
-      elif fid == 7:
         if ftype == TType.STRING:
           self.validWriteIdList = iprot.readString()
         else:
@@ -22908,12 +22810,8 @@ class RenamePartitionRequest:
       oprot.writeFieldBegin('newPart', TType.STRUCT, 5)
       self.newPart.write(oprot)
       oprot.writeFieldEnd()
-    if self.txnId is not None:
-      oprot.writeFieldBegin('txnId', TType.I64, 6)
-      oprot.writeI64(self.txnId)
-      oprot.writeFieldEnd()
     if self.validWriteIdList is not None:
-      oprot.writeFieldBegin('validWriteIdList', TType.STRING, 7)
+      oprot.writeFieldBegin('validWriteIdList', TType.STRING, 6)
       oprot.writeString(self.validWriteIdList)
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -22938,7 +22836,6 @@ class RenamePartitionRequest:
     value = (value * 31) ^ hash(self.tableName)
     value = (value * 31) ^ hash(self.partVals)
     value = (value * 31) ^ hash(self.newPart)
-    value = (value * 31) ^ hash(self.txnId)
     value = (value * 31) ^ hash(self.validWriteIdList)
     return value
 
@@ -23007,7 +22904,6 @@ class AlterTableRequest:
    - tableName
    - table
    - environmentContext
-   - txnId
    - writeId
    - validWriteIdList
   """
@@ -23019,18 +22915,16 @@ class AlterTableRequest:
     (3, TType.STRING, 'tableName', None, None, ), # 3
     (4, TType.STRUCT, 'table', (Table, Table.thrift_spec), None, ), # 4
     (5, TType.STRUCT, 'environmentContext', (EnvironmentContext, EnvironmentContext.thrift_spec), None, ), # 5
-    (6, TType.I64, 'txnId', None, -1, ), # 6
-    (7, TType.I64, 'writeId', None, -1, ), # 7
-    (8, TType.STRING, 'validWriteIdList', None, None, ), # 8
+    (6, TType.I64, 'writeId', None, -1, ), # 6
+    (7, TType.STRING, 'validWriteIdList', None, None, ), # 7
   )
 
-  def __init__(self, catName=None, dbName=None, tableName=None, table=None, environmentContext=None, txnId=thrift_spec[6][4], writeId=thrift_spec[7][4], validWriteIdList=None,):
+  def __init__(self, catName=None, dbName=None, tableName=None, table=None, environmentContext=None, writeId=thrift_spec[6][4], validWriteIdList=None,):
     self.catName = catName
     self.dbName = dbName
     self.tableName = tableName
     self.table = table
     self.environmentContext = environmentContext
-    self.txnId = txnId
     self.writeId = writeId
     self.validWriteIdList = validWriteIdList
 
@@ -23072,15 +22966,10 @@ class AlterTableRequest:
           iprot.skip(ftype)
       elif fid == 6:
         if ftype == TType.I64:
-          self.txnId = iprot.readI64()
-        else:
-          iprot.skip(ftype)
-      elif fid == 7:
-        if ftype == TType.I64:
           self.writeId = iprot.readI64()
         else:
           iprot.skip(ftype)
-      elif fid == 8:
+      elif fid == 7:
         if ftype == TType.STRING:
           self.validWriteIdList = iprot.readString()
         else:
@@ -23115,16 +23004,12 @@ class AlterTableRequest:
       oprot.writeFieldBegin('environmentContext', TType.STRUCT, 5)
       self.environmentContext.write(oprot)
       oprot.writeFieldEnd()
-    if self.txnId is not None:
-      oprot.writeFieldBegin('txnId', TType.I64, 6)
-      oprot.writeI64(self.txnId)
-      oprot.writeFieldEnd()
     if self.writeId is not None:
-      oprot.writeFieldBegin('writeId', TType.I64, 7)
+      oprot.writeFieldBegin('writeId', TType.I64, 6)
       oprot.writeI64(self.writeId)
       oprot.writeFieldEnd()
     if self.validWriteIdList is not None:
-      oprot.writeFieldBegin('validWriteIdList', TType.STRING, 8)
+      oprot.writeFieldBegin('validWriteIdList', TType.STRING, 7)
       oprot.writeString(self.validWriteIdList)
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -23147,7 +23032,6 @@ class AlterTableRequest:
     value = (value * 31) ^ hash(self.tableName)
     value = (value * 31) ^ hash(self.table)
     value = (value * 31) ^ hash(self.environmentContext)
-    value = (value * 31) ^ hash(self.txnId)
     value = (value * 31) ^ hash(self.writeId)
     value = (value * 31) ^ hash(self.validWriteIdList)
     return value

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
index 2e2392d..e0c6c02 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -608,15 +608,13 @@ class TruncateTableRequest
   DBNAME = 1
   TABLENAME = 2
   PARTNAMES = 3
-  TXNID = 4
-  WRITEID = 5
-  VALIDWRITEIDLIST = 6
+  WRITEID = 4
+  VALIDWRITEIDLIST = 5
 
   FIELDS = {
     DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
     TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'},
     PARTNAMES => {:type => ::Thrift::Types::LIST, :name => 'partNames', :element => {:type => ::Thrift::Types::STRING}, :optional => true},
-    TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
     WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true},
     VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
   }
@@ -1646,14 +1644,12 @@ class SetPartitionsStatsRequest
   include ::Thrift::Struct, ::Thrift::Struct_Union
   COLSTATS = 1
   NEEDMERGE = 2
-  TXNID = 3
-  WRITEID = 4
-  VALIDWRITEIDLIST = 5
+  WRITEID = 3
+  VALIDWRITEIDLIST = 4
 
   FIELDS = {
     COLSTATS => {:type => ::Thrift::Types::LIST, :name => 'colStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatistics}},
     NEEDMERGE => {:type => ::Thrift::Types::BOOL, :name => 'needMerge', :optional => true},
-    TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
     WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true},
     VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
   }
@@ -2178,15 +2174,13 @@ class TableStatsRequest
   TBLNAME = 2
   COLNAMES = 3
   CATNAME = 4
-  TXNID = 5
-  VALIDWRITEIDLIST = 6
+  VALIDWRITEIDLIST = 5
 
   FIELDS = {
     DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
     TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'},
     COLNAMES => {:type => ::Thrift::Types::LIST, :name => 'colNames', :element => {:type => ::Thrift::Types::STRING}},
     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
-    TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
     VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
   }
 
@@ -2208,8 +2202,7 @@ class PartitionsStatsRequest
   COLNAMES = 3
   PARTNAMES = 4
   CATNAME = 5
-  TXNID = 6
-  VALIDWRITEIDLIST = 7
+  VALIDWRITEIDLIST = 6
 
   FIELDS = {
     DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
@@ -2217,7 +2210,6 @@ class PartitionsStatsRequest
     COLNAMES => {:type => ::Thrift::Types::LIST, :name => 'colNames', :element => {:type => ::Thrift::Types::STRING}},
     PARTNAMES => {:type => ::Thrift::Types::LIST, :name => 'partNames', :element => {:type => ::Thrift::Types::STRING}},
     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
-    TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
     VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
   }
 
@@ -2259,8 +2251,7 @@ class AddPartitionsRequest
   IFNOTEXISTS = 4
   NEEDRESULT = 5
   CATNAME = 6
-  TXNID = 7
-  VALIDWRITEIDLIST = 8
+  VALIDWRITEIDLIST = 7
 
   FIELDS = {
     DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
@@ -2269,7 +2260,6 @@ class AddPartitionsRequest
     IFNOTEXISTS => {:type => ::Thrift::Types::BOOL, :name => 'ifNotExists'},
     NEEDRESULT => {:type => ::Thrift::Types::BOOL, :name => 'needResult', :default => true, :optional => true},
     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
-    TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
     VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
   }
 
@@ -3915,7 +3905,6 @@ class GetTableRequest
   TBLNAME = 2
   CAPABILITIES = 3
   CATNAME = 4
-  TXNID = 5
   VALIDWRITEIDLIST = 6
 
   FIELDS = {
@@ -3923,7 +3912,6 @@ class GetTableRequest
     TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'},
     CAPABILITIES => {:type => ::Thrift::Types::STRUCT, :name => 'capabilities', :class => ::ClientCapabilities, :optional => true},
     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
-    TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
     VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
   }
 
@@ -5114,9 +5102,8 @@ class AlterPartitionsRequest
   TABLENAME = 3
   PARTITIONS = 4
   ENVIRONMENTCONTEXT = 5
-  TXNID = 6
-  WRITEID = 7
-  VALIDWRITEIDLIST = 8
+  WRITEID = 6
+  VALIDWRITEIDLIST = 7
 
   FIELDS = {
     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
@@ -5124,7 +5111,6 @@ class AlterPartitionsRequest
     TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'},
     PARTITIONS => {:type => ::Thrift::Types::LIST, :name => 'partitions', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}},
     ENVIRONMENTCONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environmentContext', :class => ::EnvironmentContext, :optional => true},
-    TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
     WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true},
     VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
   }
@@ -5162,8 +5148,7 @@ class RenamePartitionRequest
   TABLENAME = 3
   PARTVALS = 4
   NEWPART = 5
-  TXNID = 6
-  VALIDWRITEIDLIST = 7
+  VALIDWRITEIDLIST = 6
 
   FIELDS = {
     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
@@ -5171,7 +5156,6 @@ class RenamePartitionRequest
     TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'},
     PARTVALS => {:type => ::Thrift::Types::LIST, :name => 'partVals', :element => {:type => ::Thrift::Types::STRING}},
     NEWPART => {:type => ::Thrift::Types::STRUCT, :name => 'newPart', :class => ::Partition},
-    TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
     VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
   }
 
@@ -5209,9 +5193,8 @@ class AlterTableRequest
   TABLENAME = 3
   TABLE = 4
   ENVIRONMENTCONTEXT = 5
-  TXNID = 6
-  WRITEID = 7
-  VALIDWRITEIDLIST = 8
+  WRITEID = 6
+  VALIDWRITEIDLIST = 7
 
   FIELDS = {
     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
@@ -5219,7 +5202,6 @@ class AlterTableRequest
     TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'},
     TABLE => {:type => ::Thrift::Types::STRUCT, :name => 'table', :class => ::Table},
     ENVIRONMENTCONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environmentContext', :class => ::EnvironmentContext, :optional => true},
-    TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
     WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true},
     VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
index 4cfa6a1..8a4bdd8 100644
--- a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
+++ b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
@@ -282,9 +282,8 @@ struct TruncateTableRequest {
   1: required string dbName,
   2: required string tableName,
   3: optional list<string> partNames,
-  4: optional i64 txnId=-1,
-  5: optional i64 writeId=-1,
-  6: optional string validWriteIdList
+  4: optional i64 writeId=-1,
+  5: optional string validWriteIdList
 }
 
 struct TruncateTableResponse {
@@ -596,9 +595,8 @@ struct AggrStats {
 struct SetPartitionsStatsRequest {
 1: required list<ColumnStatistics> colStats,
 2: optional bool needMerge, //stats need to be merged with the existing stats
-3: optional i64 txnId=-1,   // transaction id of the query that sends this structure
-4: optional i64 writeId=-1,         // writeId for the current query that updates the stats
-5: optional string validWriteIdList // valid write id list for the table for which this struct is being sent
+3: optional i64 writeId=-1,         // writeId for the current query that updates the stats
+4: optional string validWriteIdList // valid write id list for the table for which this struct is being sent
 }
 
 struct SetPartitionsStatsResponse {
@@ -745,8 +743,7 @@ struct TableStatsRequest {
  2: required string tblName,
  3: required list<string> colNames
  4: optional string catName,
- 5: optional i64 txnId=-1,            // transaction id of the query that sends this structure
- 6: optional string validWriteIdList  // valid write id list for the table for which this struct is being sent
+ 5: optional string validWriteIdList  // valid write id list for the table for which this struct is being sent
 }
 
 struct PartitionsStatsRequest {
@@ -755,8 +752,7 @@ struct PartitionsStatsRequest {
  3: required list<string> colNames,
  4: required list<string> partNames,
  5: optional string catName,
- 6: optional i64 txnId=-1,           // transaction id of the query that sends this structure
- 7: optional string validWriteIdList // valid write id list for the table for which this struct is being sent
+ 6: optional string validWriteIdList // valid write id list for the table for which this struct is being sent
 }
 
 // Return type for add_partitions_req
@@ -773,8 +769,7 @@ struct AddPartitionsRequest {
   4: required bool ifNotExists,
   5: optional bool needResult=true,
   6: optional string catName,
-  7: optional i64 txnId=-1,
-  8: optional string validWriteIdList
+  7: optional string validWriteIdList
 }
 
 // Return type for drop_partitions_req
@@ -1275,7 +1270,6 @@ struct GetTableRequest {
   2: required string tblName,
   3: optional ClientCapabilities capabilities,
   4: optional string catName,
-  5: optional i64 txnId=-1,
   6: optional string validWriteIdList
 }
 
@@ -1615,9 +1609,8 @@ struct AlterPartitionsRequest {
   3: required string tableName,
   4: required list<Partition> partitions,
   5: optional EnvironmentContext environmentContext,
-  6: optional i64 txnId=-1,
-  7: optional i64 writeId=-1,
-  8: optional string validWriteIdList
+  6: optional i64 writeId=-1,
+  7: optional string validWriteIdList
 }
 
 struct AlterPartitionsResponse {
@@ -1629,8 +1622,7 @@ struct RenamePartitionRequest {
   3: required string tableName,
   4: required list<string> partVals,
   5: required Partition newPart,
-  6: optional i64 txnId=-1,
-  7: optional string validWriteIdList
+  6: optional string validWriteIdList
 }
 
 struct RenamePartitionResponse {
@@ -1642,9 +1634,8 @@ struct AlterTableRequest {
   3: required string tableName,
   4: required Table table,
   5: optional EnvironmentContext environmentContext,
-  6: optional i64 txnId=-1,
-  7: optional i64 writeId=-1,
-  8: optional string validWriteIdList
+  6: optional i64 writeId=-1,
+  7: optional string validWriteIdList
 // TODO: also add cascade here, out of envCtx
 }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
index f3dc264..f4e0c41 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
@@ -61,7 +61,7 @@ public interface AlterHandler extends Configurable {
   default void alterTable(RawStore msdb, Warehouse wh, String catName, String dbname,
     String name, Table newTable, EnvironmentContext envContext)
       throws InvalidOperationException, MetaException {
-    alterTable(msdb, wh, catName, dbname, name, newTable, envContext, null, -1, null);
+    alterTable(msdb, wh, catName, dbname, name, newTable, envContext, null, null);
   }
 
   /**
@@ -88,7 +88,7 @@ public interface AlterHandler extends Configurable {
    */
   void alterTable(RawStore msdb, Warehouse wh, String catName, String dbname,
       String name, Table newTable, EnvironmentContext envContext,
-      IHMSHandler handler, long txnId, String writeIdList)
+      IHMSHandler handler,  String writeIdList)
           throws InvalidOperationException, MetaException;
 
   /**
@@ -146,7 +146,7 @@ public interface AlterHandler extends Configurable {
   Partition alterPartition(final RawStore msdb, Warehouse wh, final String catName,
                            final String dbname, final String name, final List<String> part_vals,
                            final Partition new_part, EnvironmentContext environmentContext,
-                           IHMSHandler handler, long txnId, String validWriteIds)
+                           IHMSHandler handler,  String validWriteIds)
       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
 
   /**
@@ -198,7 +198,7 @@ public interface AlterHandler extends Configurable {
    */
   List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, final String catName,
     final String dbname, final String name, final List<Partition> new_parts,
-    EnvironmentContext environmentContext, long txnId, String writeIdList, long writeId,
+    EnvironmentContext environmentContext,  String writeIdList, long writeId,
     IHMSHandler handler)
       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index 719f001..e16bf7a 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -90,7 +90,7 @@ public class HiveAlterHandler implements AlterHandler {
   @Override
   public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbname,
       String name, Table newt, EnvironmentContext environmentContext,
-      IHMSHandler handler, long txnId, String writeIdList)
+      IHMSHandler handler, String writeIdList)
           throws InvalidOperationException, MetaException {
     catName = normalizeIdentifier(catName);
     name = name.toLowerCase();
@@ -142,7 +142,7 @@ public class HiveAlterHandler implements AlterHandler {
 
       // check if table with the new name already exists
       if (!newTblName.equals(name) || !newDbName.equals(dbname)) {
-        if (msdb.getTable(catName, newDbName, newTblName,  -1, null) != null) {
+        if (msdb.getTable(catName, newDbName, newTblName, null) != null) {
           throw new InvalidOperationException("new table " + newDbName
               + "." + newTblName + " already exists");
         }
@@ -152,7 +152,7 @@ public class HiveAlterHandler implements AlterHandler {
       msdb.openTransaction();
       // get old table
       // Note: we don't verify stats here; it's done below in alterTableUpdateTableColumnStats.
-      oldt = msdb.getTable(catName, dbname, name, -1, null);
+      oldt = msdb.getTable(catName, dbname, name, null);
       if (oldt == null) {
         throw new InvalidOperationException("table " +
             TableName.getQualified(catName, dbname, name) + " doesn't exist");
@@ -278,7 +278,7 @@ public class HiveAlterHandler implements AlterHandler {
             }
           }
           // Do not verify stats parameters on a partitioned table.
-          msdb.alterTable(catName, dbname, name, newt, -1, null);
+          msdb.alterTable(catName, dbname, name, newt, null);
           // alterPartition is only for changing the partition location in the table rename
           if (dataWasMoved) {
 
@@ -297,7 +297,7 @@ public class HiveAlterHandler implements AlterHandler {
                 partValues.add(part.getValues());
               }
               msdb.alterPartitions(catName, newDbName, newTblName, partValues,
-                  partBatch, newt.getWriteId(), txnId, writeIdList);
+                  partBatch, newt.getWriteId(), writeIdList);
             }
           }
 
@@ -306,11 +306,11 @@ public class HiveAlterHandler implements AlterHandler {
             newPartColStats.getStatsDesc().setDbName(newDbName);
             newPartColStats.getStatsDesc().setTableName(newTblName);
             msdb.updatePartitionColumnStatistics(newPartColStats, partColStats.getKey().getValues(),
-                txnId, writeIdList, newt.getWriteId());
+                writeIdList, newt.getWriteId());
           }
         } else {
           alterTableUpdateTableColumnStats(
-              msdb, oldt, newt, environmentContext, txnId, writeIdList);
+              msdb, oldt, newt, environmentContext, writeIdList);
         }
       } else {
         // operations other than table rename
@@ -334,24 +334,24 @@ public class HiveAlterHandler implements AlterHandler {
               assert(colStats == null);
               if (cascade) {
                 msdb.alterPartition(
-                    catName, dbname, name, part.getValues(), part, txnId, writeIdList);
+                    catName, dbname, name, part.getValues(), part, writeIdList);
               } else {
                 // update changed properties (stats)
                 oldPart.setParameters(part.getParameters());
                 msdb.alterPartition(
-                    catName, dbname, name, part.getValues(), oldPart, txnId, writeIdList);
+                    catName, dbname, name, part.getValues(), oldPart, writeIdList);
               }
             }
             // Don't validate table-level stats for a partitoned table.
-            msdb.alterTable(catName, dbname, name, newt, -1, null);
+            msdb.alterTable(catName, dbname, name, newt, null);
           } else {
             LOG.warn("Alter table not cascaded to partitions.");
             alterTableUpdateTableColumnStats(
-                msdb, oldt, newt, environmentContext, txnId, writeIdList);
+                msdb, oldt, newt, environmentContext, writeIdList);
           }
         } else {
           alterTableUpdateTableColumnStats(
-              msdb, oldt, newt, environmentContext, txnId, writeIdList);
+              msdb, oldt, newt, environmentContext, writeIdList);
         }
       }
 
@@ -431,13 +431,13 @@ public class HiveAlterHandler implements AlterHandler {
     EnvironmentContext environmentContext)
       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
     return alterPartition(msdb, wh, DEFAULT_CATALOG_NAME, dbname, name, part_vals, new_part,
-        environmentContext, null, -1, null);
+        environmentContext, null, null);
   }
 
   @Override
   public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, String dbname,
       String name, List<String> part_vals, final Partition new_part,
-      EnvironmentContext environmentContext, IHMSHandler handler, long txnId, String validWriteIds)
+      EnvironmentContext environmentContext, IHMSHandler handler, String validWriteIds)
       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
     boolean success = false;
     Partition oldPart;
@@ -459,7 +459,7 @@ public class HiveAlterHandler implements AlterHandler {
       try {
         msdb.openTransaction();
 
-        Table tbl = msdb.getTable(catName, dbname, name,  -1, null);
+        Table tbl = msdb.getTable(catName, dbname, name, null);
         if (tbl == null) {
           throw new InvalidObjectException(
               "Unable to alter partition because table or database does not exist.");
@@ -481,7 +481,7 @@ public class HiveAlterHandler implements AlterHandler {
               oldPart.getSd().getCols(), tbl, new_part, null);
         }
         msdb.alterPartition(
-            catName, dbname, name, new_part.getValues(), new_part, txnId, validWriteIds);
+            catName, dbname, name, new_part.getValues(), new_part, validWriteIds);
         if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
           MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
                                                 EventMessage.EventType.ALTER_PARTITION,
@@ -516,7 +516,7 @@ public class HiveAlterHandler implements AlterHandler {
     Database db;
     try {
       msdb.openTransaction();
-      Table tbl = msdb.getTable(DEFAULT_CATALOG_NAME, dbname, name,  -1, null);
+      Table tbl = msdb.getTable(DEFAULT_CATALOG_NAME, dbname, name, null);
       if (tbl == null) {
         throw new InvalidObjectException(
             "Unable to alter partition because table or database does not exist.");
@@ -617,12 +617,12 @@ public class HiveAlterHandler implements AlterHandler {
       String newPartName = Warehouse.makePartName(tbl.getPartitionKeys(), new_part.getValues());
       ColumnStatistics cs = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, oldPart.getValues(),
           oldPart.getSd().getCols(), tbl, new_part, null);
-      msdb.alterPartition(catName, dbname, name, part_vals, new_part, txnId, validWriteIds);
+      msdb.alterPartition(catName, dbname, name, part_vals, new_part, validWriteIds);
       if (cs != null) {
         cs.getStatsDesc().setPartName(newPartName);
         try {
           msdb.updatePartitionColumnStatistics(cs, new_part.getValues(),
-              txnId, validWriteIds, new_part.getWriteId());
+              validWriteIds, new_part.getWriteId());
         } catch (InvalidInputException iie) {
           throw new InvalidOperationException("Unable to update partition stats in table rename." + iie);
         } catch (NoSuchObjectException nsoe) {
@@ -668,7 +668,7 @@ public class HiveAlterHandler implements AlterHandler {
     EnvironmentContext environmentContext)
       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
     return alterPartitions(msdb, wh, DEFAULT_CATALOG_NAME, dbname, name, new_parts,
-        environmentContext, -1, null, -1, null);
+        environmentContext, null, -1, null);
   }
 
   @Override
@@ -676,7 +676,7 @@ public class HiveAlterHandler implements AlterHandler {
                                          final String dbname, final String name,
                                          final List<Partition> new_parts,
                                          EnvironmentContext environmentContext,
-                                         long txnId, String writeIdList, long writeId,
+                                         String writeIdList, long writeId,
                                          IHMSHandler handler)
       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
     List<Partition> oldParts = new ArrayList<>();
@@ -691,7 +691,7 @@ public class HiveAlterHandler implements AlterHandler {
       msdb.openTransaction();
 
       // Note: should we pass in write ID here? We only update stats on parts so probably not.
-      Table tbl = msdb.getTable(catName, dbname, name,  -1, null);
+      Table tbl = msdb.getTable(catName, dbname, name, null);
       if (tbl == null) {
         throw new InvalidObjectException(
             "Unable to alter partitions because table or database does not exist.");
@@ -726,7 +726,7 @@ public class HiveAlterHandler implements AlterHandler {
         }
       }
 
-      msdb.alterPartitions(catName, dbname, name, partValsList, new_parts, writeId, txnId, writeIdList);
+      msdb.alterPartitions(catName, dbname, name, partValsList, new_parts, writeId, writeIdList);
       Iterator<Partition> oldPartsIt = oldParts.iterator();
       for (Partition newPart : new_parts) {
         Partition oldPart;
@@ -795,7 +795,7 @@ public class HiveAlterHandler implements AlterHandler {
 
   @VisibleForTesting
   void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTable,
-      EnvironmentContext ec, long txnId, String validWriteIds)
+      EnvironmentContext ec, String validWriteIds)
       throws MetaException, InvalidObjectException {
     String catName = normalizeIdentifier(oldTable.isSetCatName() ? oldTable.getCatName() :
         getDefaultCatalog(conf));
@@ -854,13 +854,13 @@ public class HiveAlterHandler implements AlterHandler {
       }
 
       // Change to new table and append stats for the new table
-      msdb.alterTable(catName, dbName, tableName, newTable, txnId, validWriteIds);
+      msdb.alterTable(catName, dbName, tableName, newTable, validWriteIds);
       if (updateColumnStats && !newStatsObjs.isEmpty()) {
         ColumnStatisticsDesc statsDesc = colStats.getStatsDesc();
         statsDesc.setDbName(newDbName);
         statsDesc.setTableName(newTableName);
         colStats.setStatsObj(newStatsObjs);
-        msdb.updateTableColumnStatistics(colStats, txnId, validWriteIds, newTable.getWriteId());
+        msdb.updateTableColumnStatistics(colStats, validWriteIds, newTable.getWriteId());
       }
     } catch (NoSuchObjectException nsoe) {
       LOG.debug("Could not find db entry." + nsoe);

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 285f7fb..eb4eb1b 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -2435,7 +2435,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
     private boolean is_table_exists(RawStore ms, String catName, String dbname, String name)
         throws MetaException {
-      return (ms.getTable(catName, dbname, name, -1, null) != null);
+      return (ms.getTable(catName, dbname, name, null) != null);
     }
 
     private boolean drop_table_core(final RawStore ms, final String catName, final String dbname,
@@ -2694,7 +2694,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     }
 
     private void alterPartitionForTruncate(RawStore ms, String catName, String dbName, String tableName,
-        Table table, Partition partition, long txnId, String validWriteIds, long writeId) throws Exception {
+        Table table, Partition partition, String validWriteIds, long writeId) throws Exception {
       EnvironmentContext environmentContext = new EnvironmentContext();
       updateStatsForTruncate(partition.getParameters(), environmentContext);
 
@@ -2714,17 +2714,17 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         partition.setWriteId(writeId);
       }
       alterHandler.alterPartition(ms, wh, catName, dbName, tableName, null, partition,
-          environmentContext, this, txnId, validWriteIds);
+          environmentContext, this, validWriteIds);
     }
 
     private void alterTableStatsForTruncate(RawStore ms, String catName, String dbName,
         String tableName, Table table, List<String> partNames,
-        long txnId, String validWriteIds, long writeId) throws Exception {
+        String validWriteIds, long writeId) throws Exception {
       if (partNames == null) {
         if (0 != table.getPartitionKeysSize()) {
           for (Partition partition : ms.getPartitions(catName, dbName, tableName, Integer.MAX_VALUE)) {
             alterPartitionForTruncate(ms, catName, dbName, tableName, table, partition,
-                txnId, validWriteIds, writeId);
+                validWriteIds, writeId);
           }
         } else {
           EnvironmentContext environmentContext = new EnvironmentContext();
@@ -2747,12 +2747,12 @@ public class HiveMetaStore extends ThriftHiveMetastore {
             table.setWriteId(writeId);
           }
           alterHandler.alterTable(ms, wh, catName, dbName, tableName, table,
-              environmentContext, this, txnId, validWriteIds);
+              environmentContext, this, validWriteIds);
         }
       } else {
         for (Partition partition : ms.getPartitionsByNames(catName, dbName, tableName, partNames)) {
           alterPartitionForTruncate(ms, catName, dbName, tableName, table, partition,
-              txnId, validWriteIds, writeId);
+              validWriteIds, writeId);
         }
       }
       return;
@@ -2791,19 +2791,19 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     public void truncate_table(final String dbName, final String tableName, List<String> partNames)
       throws NoSuchObjectException, MetaException {
       // Deprecated path, won't work for txn tables.
-      truncateTableInternal(dbName, tableName, partNames, -1, null, -1);
+      truncateTableInternal(dbName, tableName, partNames, null, -1);
     }
 
     @Override
     public TruncateTableResponse truncate_table_req(TruncateTableRequest req)
         throws MetaException, TException {
       truncateTableInternal(req.getDbName(), req.getTableName(), req.getPartNames(),
-          req.getTxnId(), req.getValidWriteIdList(), req.getWriteId());
+          req.getValidWriteIdList(), req.getWriteId());
       return new TruncateTableResponse();
     }
 
     private void truncateTableInternal(String dbName, String tableName, List<String> partNames,
-        long txnId, String validWriteIds, long writeId) throws MetaException, NoSuchObjectException {
+        String validWriteIds, long writeId) throws MetaException, NoSuchObjectException {
       try {
         String[] parsedDbName = parseDbName(dbName, conf);
         Table tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName);
@@ -2835,7 +2835,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
         // Alter the table/partition stats and also notify truncate table event
         alterTableStatsForTruncate(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME],
-            tableName, tbl, partNames, txnId, validWriteIds, writeId);
+            tableName, tbl, partNames, validWriteIds, writeId);
       } catch (IOException e) {
         throw new MetaException(e.getMessage());
       } catch (MetaException | NoSuchObjectException e) {
@@ -2866,7 +2866,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         NoSuchObjectException {
       String[] parsedDbName = parseDbName(dbname, conf);
       return getTableInternal(
-          parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, null, -1, null);
+          parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, null, null);
     }
 
     @Override
@@ -2874,11 +2874,11 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         NoSuchObjectException {
       String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf);
       return new GetTableResult(getTableInternal(catName, req.getDbName(), req.getTblName(),
-          req.getCapabilities(), req.getTxnId(), req.getValidWriteIdList()));
+          req.getCapabilities(), req.getValidWriteIdList()));
     }
 
     private Table getTableInternal(String catName, String dbname, String name,
-        ClientCapabilities capabilities, long txnId, String writeIdList)
+        ClientCapabilities capabilities, String writeIdList)
         throws MetaException, NoSuchObjectException {
       if (isInTest) {
         assertClientHasCapability(capabilities, ClientCapability.TEST_CAPABILITY,
@@ -2889,7 +2889,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       startTableFunction("get_table", catName, dbname, name);
       Exception ex = null;
       try {
-        t = get_table_core(catName, dbname, name, txnId, writeIdList);
+        t = get_table_core(catName, dbname, name, writeIdList);
         if (MetaStoreUtils.isInsertOnlyTableParam(t.getParameters())) {
           assertClientHasCapability(capabilities, ClientCapability.INSERT_ONLY_TABLES,
               "insert-only tables", "get_table_req");
@@ -2929,7 +2929,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         final String dbname,
         final String name)
         throws MetaException, NoSuchObjectException {
-      return get_table_core(catName, dbname, name, -1, null);
+      return get_table_core(catName, dbname, name, null);
     }
 
     @Override
@@ -2937,12 +2937,11 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         final String catName,
         final String dbname,
         final String name,
-        final long txnId,
         final String writeIdList)
         throws MetaException, NoSuchObjectException {
       Table t = null;
       try {
-        t = getMS().getTable(catName, dbname, name, txnId, writeIdList);
+        t = getMS().getTable(catName, dbname, name, writeIdList);
         if (t == null) {
           throw new NoSuchObjectException(TableName.getQualified(catName, dbname, name) +
             " table not found");
@@ -3124,7 +3123,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
         MetaStoreUtils.validatePartitionNameCharacters(part_vals, partitionValidationPattern);
 
-        tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName(), -1, null);
+        tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName(), null);
         if (tbl == null) {
           throw new InvalidObjectException(
               "Unable to add partition because table or database do not exist");
@@ -3318,7 +3317,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
       try {
         ms.openTransaction();
-        tbl = ms.getTable(catName, dbName, tblName, -1, null);
+        tbl = ms.getTable(catName, dbName, tblName, null);
         if (tbl == null) {
           throw new InvalidObjectException("Unable to add partitions because "
               + TableName.getQualified(catName, dbName, tblName) +
@@ -3674,7 +3673,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       Database db = null;
       try {
         ms.openTransaction();
-        tbl = ms.getTable(catName, dbName, tblName, -1, null);
+        tbl = ms.getTable(catName, dbName, tblName, null);
         if (tbl == null) {
           throw new InvalidObjectException("Unable to add partitions because "
               + "database or table " + dbName + "." + tblName + " does not exist");
@@ -3844,7 +3843,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       }
       try {
         ms.openTransaction();
-        tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName(), -1, null);
+        tbl = ms.getTable(part.getCatName(), part.getDbName(), part.getTableName(), null);
         if (tbl == null) {
           throw new InvalidObjectException(
               "Unable to add partition because table or database do not exist");
@@ -3966,7 +3965,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
       Table destinationTable =
           ms.getTable(
-              parsedDestDbName[CAT_NAME], parsedDestDbName[DB_NAME], destTableName, -1, null);
+              parsedDestDbName[CAT_NAME], parsedDestDbName[DB_NAME], destTableName, null);
       if (destinationTable == null) {
         throw new MetaException( "The destination table " +
             TableName.getQualified(parsedDestDbName[CAT_NAME],
@@ -3974,7 +3973,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       }
       Table sourceTable =
           ms.getTable(
-              parsedSourceDbName[CAT_NAME], parsedSourceDbName[DB_NAME], sourceTableName, -1, null);
+              parsedSourceDbName[CAT_NAME], parsedSourceDbName[DB_NAME], sourceTableName, null);
       if (sourceTable == null) {
         throw new MetaException("The source table " +
             TableName.getQualified(parsedSourceDbName[CAT_NAME],
@@ -4151,7 +4150,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       try {
         ms.openTransaction();
         part = ms.getPartition(catName, db_name, tbl_name, part_vals);
-        tbl = get_table_core(catName, db_name, tbl_name, -1, null);
+        tbl = get_table_core(catName, db_name, tbl_name, null);
         tableDataShouldBeDeleted = checkTableDataShouldBeDeleted(tbl, deleteData);
         firePreEvent(new PreDropPartitionEvent(tbl, part, deleteData, this));
         mustPurge = isMustPurge(envContext, tbl);
@@ -4826,7 +4825,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       String[] parsedDbName = parseDbName(dbName, conf);
       // TODO: this method name is confusing, it actually does full alter (sortof)
       rename_partition(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, null, newPartition,
-          envContext, -1, null);
+          envContext, null);
     }
 
     @Deprecated
@@ -4837,18 +4836,18 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       // Call rename_partition without an environment context.
       String[] parsedDbName = parseDbName(db_name, conf);
       rename_partition(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals, new_part,
-          null, -1, null);
+          null, null);
     }
 
     public RenamePartitionResponse rename_partition_req(
         RenamePartitionRequest req) throws InvalidOperationException ,MetaException ,TException {
       rename_partition(req.getCatName(), req.getDbName(), req.getTableName(), req.getPartVals(),
-          req.getNewPart(), null, req.getTxnId(), req.getValidWriteIdList());
+          req.getNewPart(), null, req.getValidWriteIdList());
       return new RenamePartitionResponse();
     };
 
     private void rename_partition(String catName, String db_name, String tbl_name,
-        List<String> part_vals, Partition new_part, EnvironmentContext envContext, long txnId,
+        List<String> part_vals, Partition new_part, EnvironmentContext envContext,
         String validWriteIds) throws TException {
       startTableFunction("alter_partition", catName, db_name, tbl_name);
 
@@ -4883,13 +4882,13 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         }
 
         oldPart = alterHandler.alterPartition(getMS(), wh, catName, db_name, tbl_name,
-            part_vals, new_part, envContext, this, txnId, validWriteIds);
+            part_vals, new_part, envContext, this, validWriteIds);
 
         // Only fetch the table if we actually have a listener
         Table table = null;
         if (!listeners.isEmpty()) {
           if (table == null) {
-            table = getMS().getTable(catName, db_name, tbl_name, -1, null);
+            table = getMS().getTable(catName, db_name, tbl_name, null);
           }
 
           MetaStoreListenerNotifier.notifyEvent(listeners,
@@ -4920,14 +4919,13 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         throws TException {
       String[] o = parseDbName(db_name, conf);
       alter_partitions_with_environment_context(o[0], o[1],
-          tbl_name, new_parts, null, -1, null, -1);
+          tbl_name, new_parts, null, null, -1);
     }
 
     @Override
     public AlterPartitionsResponse alter_partitions_req(AlterPartitionsRequest req) throws TException {
       alter_partitions_with_environment_context(req.getCatName(),
           req.getDbName(), req.getTableName(), req.getPartitions(), req.getEnvironmentContext(),
-          req.isSetTxnId() ? req.getTxnId() : -1,
           req.isSetValidWriteIdList() ? req.getValidWriteIdList() : null,
           req.isSetWriteId() ? req.getWriteId() : -1);
       return new AlterPartitionsResponse();
@@ -4941,12 +4939,12 @@ public class HiveMetaStore extends ThriftHiveMetastore {
             throws TException {
       String[] o = parseDbName(db_name, conf);
       alter_partitions_with_environment_context(o[0], o[1], tbl_name, new_parts, environmentContext,
-          -1, null, -1);
+          null, -1);
     }
 
     private void alter_partitions_with_environment_context(String catName, String db_name, final String tbl_name,
         final List<Partition> new_parts, EnvironmentContext environmentContext,
-        long txnId, String writeIdList, long writeId)
+        String writeIdList, long writeId)
         throws TException {
       if (environmentContext == null) {
         environmentContext = new EnvironmentContext();
@@ -4975,7 +4973,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           firePreEvent(new PreAlterPartitionEvent(db_name, tbl_name, null, tmpPart, this));
         }
         oldParts = alterHandler.alterPartitions(getMS(), wh,
-            catName, db_name, tbl_name, new_parts, environmentContext, txnId, writeIdList, writeId, this);
+            catName, db_name, tbl_name, new_parts, environmentContext, writeIdList, writeId, this);
         Iterator<Partition> olditr = oldParts.iterator();
         // Only fetch the table if we have a listener that needs it.
         Table table = null;
@@ -4989,7 +4987,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           }
 
           if (table == null) {
-            table = getMS().getTable(catName, db_name, tbl_name,  -1, null);
+            table = getMS().getTable(catName, db_name, tbl_name,  null);
           }
 
           if (!listeners.isEmpty()) {
@@ -5028,7 +5026,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       // Do not set an environment context.
       String[] parsedDbName = parseDbName(dbname, conf);
       alter_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, newTable,
-          null, -1, null);
+          null, null);
     }
 
     @Override
@@ -5042,14 +5040,14 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       }
       String[] parsedDbName = parseDbName(dbname, conf);
       alter_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], name, newTable,
-          envContext, -1, null);
+          envContext, null);
     }
 
     @Override
     public AlterTableResponse alter_table_req(AlterTableRequest req)
         throws InvalidOperationException, MetaException, TException {
       alter_table_core(req.getCatName(), req.getDbName(), req.getTableName(),
-          req.getTable(), req.getEnvironmentContext(), req.getTxnId(), req.getValidWriteIdList());
+          req.getTable(), req.getEnvironmentContext(), req.getValidWriteIdList());
       return new AlterTableResponse();
     }
 
@@ -5060,11 +5058,11 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         throws InvalidOperationException, MetaException {
       String[] parsedDbName = parseDbName(dbname, conf);
       alter_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME],
-          name, newTable, envContext, -1, null);
+          name, newTable, envContext, null);
     }
 
     private void alter_table_core(String catName, String dbname, String name, Table newTable,
-        EnvironmentContext envContext, long txnId, String validWriteIdList)
+        EnvironmentContext envContext, String validWriteIdList)
         throws InvalidOperationException, MetaException {
       startFunction("alter_table", ": " + TableName.getQualified(catName, dbname, name)
           + " newtbl=" + newTable.getTableName());
@@ -5101,7 +5099,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         Table oldt = get_table_core(catName, dbname, name);
         firePreEvent(new PreAlterTableEvent(oldt, newTable, this));
         alterHandler.alterTable(getMS(), wh, catName, dbname, name, newTable,
-                envContext, this, txnId, validWriteIdList);
+                envContext, this, validWriteIdList);
         success = true;
       } catch (NoSuchObjectException e) {
         // thrown when the table to be altered does not exist
@@ -5426,7 +5424,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     private List<String> getPartValsFromName(RawStore ms, String catName, String dbName,
                                              String tblName, String partName)
         throws MetaException, InvalidObjectException {
-      Table t = ms.getTable(catName, dbName, tblName,  -1, null);
+      Table t = ms.getTable(catName, dbName, tblName,  null);
       if (t == null) {
         throw new InvalidObjectException(dbName + "." + tblName
             + " table not found");
@@ -5684,7 +5682,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       try {
         statsObj = getMS().getTableColumnStatistics(
             parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, Lists.newArrayList(colName),
-            -1, null);
+            null);
         if (statsObj != null) {
           assert statsObj.getStatsObjSize() <= 1;
         }
@@ -5710,7 +5708,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       try {
         ColumnStatistics cs = getMS().getTableColumnStatistics(
             catName, dbName, tblName, lowerCaseColNames,
-            request.getTxnId(), request.getValidWriteIdList());
+            request.getValidWriteIdList());
         // Note: stats compliance is not propagated to the client; instead, we just return nothing
         //       if stats are not compliant for now. This won't work for stats merging, but that
         //       is currently only done on metastore size (see set_aggr...).
@@ -5779,7 +5777,6 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       try {
         List<ColumnStatistics> stats = getMS().getPartitionColumnStatistics(
             catName, dbName, tblName, lowerCasePartNames, lowerCaseColNames,
-            request.isSetTxnId() ? request.getTxnId() : -1,
             request.isSetValidWriteIdList() ? request.getValidWriteIdList() : null);
         Map<String, List<ColumnStatisticsObj>> map = new HashMap<>();
         if (stats != null) {
@@ -5803,7 +5800,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     @Override
     public boolean update_table_column_statistics(ColumnStatistics colStats) throws TException {
       // Deprecated API, won't work for transactional tables
-      return updateTableColumnStatsInternal(colStats, -1, null, -1);
+      return updateTableColumnStatsInternal(colStats, null, -1);
     }
 
     @Override
@@ -5819,12 +5816,12 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       }
       ColumnStatistics colStats = req.getColStatsIterator().next();
       boolean ret = updateTableColumnStatsInternal(colStats,
-          req.getTxnId(), req.getValidWriteIdList(), req.getWriteId());
+          req.getValidWriteIdList(), req.getWriteId());
       return new SetPartitionsStatsResponse(ret);
     }
 
     private boolean updateTableColumnStatsInternal(ColumnStatistics colStats,
-        long txnId, String validWriteIds, long writeId)
+        String validWriteIds, long writeId)
         throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
       normalizeColStatsInput(colStats);
 
@@ -5834,7 +5831,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
       boolean ret = false;
       try {
-        ret = getMS().updateTableColumnStatistics(colStats, txnId, validWriteIds, writeId);
+        ret = getMS().updateTableColumnStatistics(colStats, validWriteIds, writeId);
       } finally {
         endFunction("write_column_statistics", ret != false, null,
             colStats.getStatsDesc().getTableName());
@@ -5861,7 +5858,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     }
 
     private boolean updatePartitonColStatsInternal(Table tbl, ColumnStatistics colStats,
-        long txnId, String validWriteIds, long writeId)
+        String validWriteIds, long writeId)
         throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException {
       normalizeColStatsInput(colStats);
 
@@ -5878,7 +5875,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         }
         List<String> partVals = getPartValsFromName(tbl, csd.getPartName());
         return getMS().updatePartitionColumnStatistics(
-            colStats, partVals, txnId, validWriteIds, writeId);
+            colStats, partVals, validWriteIds, writeId);
       } finally {
         endFunction("write_partition_column_statistics", ret != false, null, tableName);
       }
@@ -5887,7 +5884,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     @Override
     public boolean update_partition_column_statistics(ColumnStatistics colStats) throws TException {
       // Deprecated API.
-      return updatePartitonColStatsInternal(null, colStats, -1, null, -1);
+      return updatePartitonColStatsInternal(null, colStats, null, -1);
     }
 
 
@@ -5904,7 +5901,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       }
       ColumnStatistics colStats = req.getColStatsIterator().next();
       boolean ret = updatePartitonColStatsInternal(null, colStats,
-          req.getTxnId(), req.getValidWriteIdList(), req.getWriteId());
+          req.getValidWriteIdList(), req.getWriteId());
       return new SetPartitionsStatsResponse(ret);
     }
 
@@ -7560,8 +7557,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
       try {
         aggrStats = getMS().get_aggr_stats_for(catName, dbName, tblName,
-            lowerCasePartNames, lowerCaseColNames, request.getTxnId(),
-            request.getValidWriteIdList());
+            lowerCasePartNames, lowerCaseColNames, request.getValidWriteIdList());
         return aggrStats;
       } finally {
           endFunction("get_aggr_stats_for", aggrStats == null, null, request.getTblName());
@@ -7597,12 +7593,11 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           return updateTableColumnStatsWithMerge(catName, dbName, tableName, colNames, request);
         } else {
           // This is the overwrite case, we do not care about the accuracy.
-          return updateTableColumnStatsInternal(firstColStats, request.getTxnId(),
+          return updateTableColumnStatsInternal(firstColStats,
               request.getValidWriteIdList(), request.getWriteId());
         }
       } else {
         // partition level column stats merging
-        List<Partition> partitions = new ArrayList<>();
         // note that we may have two or more duplicate partition names.
         // see autoColumnStats_2.q under TestMiniLlapLocalCliDriver
         Map<String, ColumnStatistics> newStatsMap = new HashMap<>();
@@ -7621,7 +7616,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           Table t = getTable(catName, dbName, tableName);
           for (Entry<String, ColumnStatistics> entry : newStatsMap.entrySet()) {
             // We don't short-circuit on errors here anymore. That can leave acid stats invalid.
-            ret = updatePartitonColStatsInternal(t, entry.getValue(), request.getTxnId(),
+            ret = updatePartitonColStatsInternal(t, entry.getValue(),
                 request.getValidWriteIdList(), request.getWriteId()) && ret;
           }
         }
@@ -7640,7 +7635,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         List<String> partitionNames = new ArrayList<>();
         partitionNames.addAll(newStatsMap.keySet());
         List<ColumnStatistics> csOlds = ms.getPartitionColumnStatistics(catName, dbName, tableName,
-            partitionNames, colNames, request.getTxnId(), request.getValidWriteIdList());
+            partitionNames, colNames, request.getValidWriteIdList());
         if (newStatsMap.values().size() != csOlds.size()) {
           // some of the partitions miss stats.
           LOG.debug("Some of the partitions miss stats.");
@@ -7678,7 +7673,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
           if (!csNew.getStatsObj().isEmpty()) {
             // We don't short-circuit on errors here anymore. That can leave acid stats invalid.
-            result = updatePartitonColStatsInternal(t, csNew, request.getTxnId(),
+            result = updatePartitonColStatsInternal(t, csNew,
                 request.getValidWriteIdList(), request.getWriteId()) && result;
           } else if (isInvalidTxnStats) {
             // For now because the stats state is such as it is, we will invalidate everything.
@@ -7688,7 +7683,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
             StatsSetupConst.clearColumnStatsState(part.getParameters());
             StatsSetupConst.setBasicStatsState(part.getParameters(), StatsSetupConst.FALSE);
             ms.alterPartition(catName, dbName, tableName, part.getValues(), part,
-                request.getTxnId(), request.getValidWriteIdList());
+                request.getValidWriteIdList());
             result = false;
           } else {
             // TODO: why doesn't the original call for non acid tables invalidate the stats?
@@ -7716,7 +7711,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
       boolean isCommitted = false, result = false;
       try {
         ColumnStatistics csOld = ms.getTableColumnStatistics(catName, dbName, tableName, colNames,
-            request.getTxnId(), request.getValidWriteIdList());
+            request.getValidWriteIdList());
         // we first use the valid stats list to prune the stats
         boolean isInvalidTxnStats = csOld != null
             && csOld.isSetIsStatsCompliant() && !csOld.isIsStatsCompliant();
@@ -7734,7 +7729,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         }
 
         if (!firstColStats.getStatsObj().isEmpty()) {
-          result = updateTableColumnStatsInternal(firstColStats, request.getTxnId(),
+          result = updateTableColumnStatsInternal(firstColStats,
               request.getValidWriteIdList(), request.getWriteId());
         } else if (isInvalidTxnStats) {
           // For now because the stats state is such as it is, we will invalidate everything.
@@ -7744,7 +7739,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
           t.setWriteId(request.getWriteId());
           StatsSetupConst.clearColumnStatsState(t.getParameters());
           StatsSetupConst.setBasicStatsState(t.getParameters(), StatsSetupConst.FALSE);
-          ms.alterTable(catName, dbName, tableName, t, request.getTxnId(), request.getValidWriteIdList());
+          ms.alterTable(catName, dbName, tableName, t, request.getValidWriteIdList());
         } else {
           // TODO: why doesn't the original call for non acid tables invalidate the stats?
           LOG.debug("All the column stats are not accurate to merge.");
@@ -7763,13 +7758,13 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
     private Table getTable(String catName, String dbName, String tableName)
         throws MetaException, InvalidObjectException {
-      return getTable(catName, dbName, tableName, -1, null);
+      return getTable(catName, dbName, tableName, null);
     }
 
     private Table getTable(String catName, String dbName, String tableName,
-                           long txnId, String writeIdList)
+                           String writeIdList)
         throws MetaException, InvalidObjectException {
-      Table t = getMS().getTable(catName, dbName, tableName, txnId, writeIdList);
+      Table t = getMS().getTable(catName, dbName, tableName, writeIdList);
       if (t == null) {
         throw new InvalidObjectException(TableName.getQualified(catName, dbName, tableName)
             + " table not found");


[12/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
index 0000000,d53e7fc..e81ea2c
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
@@@ -1,0 -1,1218 +1,1220 @@@
+ -- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
+ --
+ -- Host: localhost    Database: test
+ -- ------------------------------------------------------
+ -- Server version	5.5.25
+ 
+ /*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+ /*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+ /*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+ /*!40101 SET NAMES utf8 */;
+ /*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+ /*!40103 SET TIME_ZONE='+00:00' */;
+ /*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+ /*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+ /*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+ /*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+ 
+ --
+ -- Table structure for table `BUCKETING_COLS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
+   `SD_ID` bigint(20) NOT NULL,
+   `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+   KEY `BUCKETING_COLS_N49` (`SD_ID`),
+   CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `CDS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `CDS` (
+   `CD_ID` bigint(20) NOT NULL,
+   PRIMARY KEY (`CD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `COLUMNS_V2`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
+   `CD_ID` bigint(20) NOT NULL,
+   `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `TYPE_NAME` MEDIUMTEXT DEFAULT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
+   KEY `COLUMNS_V2_N49` (`CD_ID`),
+   CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `DATABASE_PARAMS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
+   `DB_ID` bigint(20) NOT NULL,
+   `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
+   KEY `DATABASE_PARAMS_N49` (`DB_ID`),
+   CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ CREATE TABLE `CTLGS` (
+     `CTLG_ID` BIGINT PRIMARY KEY,
+     `NAME` VARCHAR(256),
+     `DESC` VARCHAR(4000),
+     `LOCATION_URI` VARCHAR(4000) NOT NULL,
+     UNIQUE KEY `UNIQUE_CATALOG` (`NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ 
+ --
+ -- Table structure for table `DBS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `DBS` (
+   `DB_ID` bigint(20) NOT NULL,
+   `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `CTLG_NAME` varchar(256) NOT NULL,
+   PRIMARY KEY (`DB_ID`),
+   UNIQUE KEY `UNIQUE_DATABASE` (`NAME`, `CTLG_NAME`),
+   CONSTRAINT `CTLG_FK1` FOREIGN KEY (`CTLG_NAME`) REFERENCES `CTLGS` (`NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `DB_PRIVS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
+   `DB_GRANT_ID` bigint(20) NOT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `DB_ID` bigint(20) DEFAULT NULL,
+   `GRANT_OPTION` smallint(6) NOT NULL,
+   `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`DB_GRANT_ID`),
+   UNIQUE KEY `DBPRIVILEGEINDEX` (`AUTHORIZER`,`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+   KEY `DB_PRIVS_N49` (`DB_ID`),
+   CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `GLOBAL_PRIVS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
+   `USER_GRANT_ID` bigint(20) NOT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `GRANT_OPTION` smallint(6) NOT NULL,
+   `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`USER_GRANT_ID`),
+   UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`AUTHORIZER`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `IDXS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `IDXS` (
+   `INDEX_ID` bigint(20) NOT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `DEFERRED_REBUILD` bit(1) NOT NULL,
+   `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
+   `LAST_ACCESS_TIME` int(11) NOT NULL,
+   `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
+   `SD_ID` bigint(20) DEFAULT NULL,
+   PRIMARY KEY (`INDEX_ID`),
+   UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
+   KEY `IDXS_N51` (`SD_ID`),
+   KEY `IDXS_N50` (`INDEX_TBL_ID`),
+   KEY `IDXS_N49` (`ORIG_TBL_ID`),
+   CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+   CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+   CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `INDEX_PARAMS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` (
+   `INDEX_ID` bigint(20) NOT NULL,
+   `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
+   KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
+   CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `NUCLEUS_TABLES`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` (
+   `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`CLASS_NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `PARTITIONS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `PARTITIONS` (
+   `PART_ID` bigint(20) NOT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `LAST_ACCESS_TIME` int(11) NOT NULL,
+   `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `SD_ID` bigint(20) DEFAULT NULL,
+   `TBL_ID` bigint(20) DEFAULT NULL,
++  `WRITE_ID` bigint(20) DEFAULT 0,
+   PRIMARY KEY (`PART_ID`),
+   UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
+   KEY `PARTITIONS_N49` (`TBL_ID`),
+   KEY `PARTITIONS_N50` (`SD_ID`),
+   CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+   CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `PARTITION_EVENTS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
+   `PART_NAME_ID` bigint(20) NOT NULL,
+   `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `EVENT_TIME` bigint(20) NOT NULL,
+   `EVENT_TYPE` int(11) NOT NULL,
+   `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`PART_NAME_ID`),
+   KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `PARTITION_KEYS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` (
+   `TBL_ID` bigint(20) NOT NULL,
+   `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
+   KEY `PARTITION_KEYS_N49` (`TBL_ID`),
+   CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `PARTITION_KEY_VALS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` (
+   `PART_ID` bigint(20) NOT NULL,
+   `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
+   KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
+   CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `PARTITION_PARAMS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
+   `PART_ID` bigint(20) NOT NULL,
+   `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
+   KEY `PARTITION_PARAMS_N49` (`PART_ID`),
+   CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `PART_COL_PRIVS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
+   `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+   `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `GRANT_OPTION` smallint(6) NOT NULL,
+   `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PART_ID` bigint(20) DEFAULT NULL,
+   `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
+   KEY `PART_COL_PRIVS_N49` (`PART_ID`),
+   KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`AUTHORIZER`,`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+   CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `PART_PRIVS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
+   `PART_GRANT_ID` bigint(20) NOT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `GRANT_OPTION` smallint(6) NOT NULL,
+   `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PART_ID` bigint(20) DEFAULT NULL,
+   `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`PART_GRANT_ID`),
+   KEY `PARTPRIVILEGEINDEX` (`AUTHORIZER`,`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+   KEY `PART_PRIVS_N49` (`PART_ID`),
+   CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `ROLES`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `ROLES` (
+   `ROLE_ID` bigint(20) NOT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`ROLE_ID`),
+   UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `ROLE_MAP`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `ROLE_MAP` (
+   `ROLE_GRANT_ID` bigint(20) NOT NULL,
+   `ADD_TIME` int(11) NOT NULL,
+   `GRANT_OPTION` smallint(6) NOT NULL,
+   `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `ROLE_ID` bigint(20) DEFAULT NULL,
+   PRIMARY KEY (`ROLE_GRANT_ID`),
+   UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
+   KEY `ROLE_MAP_N49` (`ROLE_ID`),
+   CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SDS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SDS` (
+   `SD_ID` bigint(20) NOT NULL,
+   `CD_ID` bigint(20) DEFAULT NULL,
+   `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `IS_COMPRESSED` bit(1) NOT NULL,
+   `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL,
+   `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `NUM_BUCKETS` int(11) NOT NULL,
+   `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `SERDE_ID` bigint(20) DEFAULT NULL,
+   PRIMARY KEY (`SD_ID`),
+   KEY `SDS_N49` (`SERDE_ID`),
+   KEY `SDS_N50` (`CD_ID`),
+   CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
+   CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SD_PARAMS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
+   `SD_ID` bigint(20) NOT NULL,
+   `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
+   KEY `SD_PARAMS_N49` (`SD_ID`),
+   CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SEQUENCE_TABLE`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` (
+   `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `NEXT_VAL` bigint(20) NOT NULL,
+   PRIMARY KEY (`SEQUENCE_NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ INSERT INTO `SEQUENCE_TABLE` (`SEQUENCE_NAME`, `NEXT_VAL`) VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1);
+ 
+ --
+ -- Table structure for table `SERDES`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SERDES` (
+   `SERDE_ID` bigint(20) NOT NULL,
+   `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `DESCRIPTION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `SERIALIZER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `DESERIALIZER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `SERDE_TYPE` integer,
+   PRIMARY KEY (`SERDE_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SERDE_PARAMS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
+   `SERDE_ID` bigint(20) NOT NULL,
+   `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
+   KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
+   CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SKEWED_COL_NAMES`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` (
+   `SD_ID` bigint(20) NOT NULL,
+   `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+   KEY `SKEWED_COL_NAMES_N49` (`SD_ID`),
+   CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SKEWED_COL_VALUE_LOC_MAP`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` (
+   `SD_ID` bigint(20) NOT NULL,
+   `STRING_LIST_ID_KID` bigint(20) NOT NULL,
+   `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`),
+   KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`),
+   KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`),
+   CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+   CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SKEWED_STRING_LIST`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` (
+   `STRING_LIST_ID` bigint(20) NOT NULL,
+   PRIMARY KEY (`STRING_LIST_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SKEWED_STRING_LIST_VALUES`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` (
+   `STRING_LIST_ID` bigint(20) NOT NULL,
+   `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`),
+   KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`),
+   CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SKEWED_VALUES`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
+   `SD_ID_OID` bigint(20) NOT NULL,
+   `STRING_LIST_ID_EID` bigint(20) NOT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`),
+   KEY `SKEWED_VALUES_N50` (`SD_ID_OID`),
+   KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`),
+   CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+   CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SORT_COLS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SORT_COLS` (
+   `SD_ID` bigint(20) NOT NULL,
+   `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `ORDER` int(11) NOT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+   KEY `SORT_COLS_N49` (`SD_ID`),
+   CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `TABLE_PARAMS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
+   `TBL_ID` bigint(20) NOT NULL,
+   `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
+   KEY `TABLE_PARAMS_N49` (`TBL_ID`),
+   CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `MV_CREATION_METADATA`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `MV_CREATION_METADATA` (
+   `MV_CREATION_METADATA_ID` bigint(20) NOT NULL,
+   `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `TXN_LIST` TEXT DEFAULT NULL,
+   `MATERIALIZATION_TIME` bigint(20) NOT NULL,
+   PRIMARY KEY (`MV_CREATION_METADATA_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ CREATE INDEX MV_UNIQUE_TABLE ON MV_CREATION_METADATA (TBL_NAME, DB_NAME) USING BTREE;
+ 
+ --
+ -- Table structure for table `TBLS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `TBLS` (
+   `TBL_ID` bigint(20) NOT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `DB_ID` bigint(20) DEFAULT NULL,
+   `LAST_ACCESS_TIME` int(11) NOT NULL,
+   `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `RETENTION` int(11) NOT NULL,
+   `SD_ID` bigint(20) DEFAULT NULL,
+   `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `VIEW_EXPANDED_TEXT` mediumtext,
+   `VIEW_ORIGINAL_TEXT` mediumtext,
+   `IS_REWRITE_ENABLED` bit(1) NOT NULL DEFAULT 0,
++  `WRITE_ID` bigint(20) DEFAULT 0,
+   PRIMARY KEY (`TBL_ID`),
+   UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
+   KEY `TBLS_N50` (`SD_ID`),
+   KEY `TBLS_N49` (`DB_ID`),
+   CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+   CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `MV_TABLES_USED`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `MV_TABLES_USED` (
+   `MV_CREATION_METADATA_ID` bigint(20) NOT NULL,
+   `TBL_ID` bigint(20) NOT NULL,
+   CONSTRAINT `MV_TABLES_USED_FK1` FOREIGN KEY (`MV_CREATION_METADATA_ID`) REFERENCES `MV_CREATION_METADATA` (`MV_CREATION_METADATA_ID`),
+   CONSTRAINT `MV_TABLES_USED_FK2` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `TBL_COL_PRIVS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
+   `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+   `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `GRANT_OPTION` smallint(6) NOT NULL,
+   `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `TBL_ID` bigint(20) DEFAULT NULL,
+   `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
+   KEY `TABLECOLUMNPRIVILEGEINDEX` (`AUTHORIZER`,`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+   KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
+   CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `TBL_PRIVS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
+   `TBL_GRANT_ID` bigint(20) NOT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `GRANT_OPTION` smallint(6) NOT NULL,
+   `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `TBL_ID` bigint(20) DEFAULT NULL,
+   `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`TBL_GRANT_ID`),
+   KEY `TBL_PRIVS_N49` (`TBL_ID`),
+   KEY `TABLEPRIVILEGEINDEX` (`AUTHORIZER`,`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+   CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `TAB_COL_STATS`
+ --
+ CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
+  `CS_ID` bigint(20) NOT NULL,
+  `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TBL_ID` bigint(20) NOT NULL,
+  `LONG_LOW_VALUE` bigint(20),
+  `LONG_HIGH_VALUE` bigint(20),
+  `DOUBLE_HIGH_VALUE` double(53,4),
+  `DOUBLE_LOW_VALUE` double(53,4),
+  `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `NUM_NULLS` bigint(20) NOT NULL,
+  `NUM_DISTINCTS` bigint(20),
+  `BIT_VECTOR` blob,
+  `AVG_COL_LEN` double(53,4),
+  `MAX_COL_LEN` bigint(20),
+  `NUM_TRUES` bigint(20),
+  `NUM_FALSES` bigint(20),
+  `LAST_ANALYZED` bigint(20) NOT NULL,
+   PRIMARY KEY (`CS_ID`),
+   CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE INDEX TAB_COL_STATS_IDX ON TAB_COL_STATS (CAT_NAME, DB_NAME, TABLE_NAME, COLUMN_NAME) USING BTREE;
+ --
+ -- Table structure for table `PART_COL_STATS`
+ --
+ CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
+  `CS_ID` bigint(20) NOT NULL,
+  `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PART_ID` bigint(20) NOT NULL,
+  `LONG_LOW_VALUE` bigint(20),
+  `LONG_HIGH_VALUE` bigint(20),
+  `DOUBLE_HIGH_VALUE` double(53,4),
+  `DOUBLE_LOW_VALUE` double(53,4),
+  `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `NUM_NULLS` bigint(20) NOT NULL,
+  `NUM_DISTINCTS` bigint(20),
+  `BIT_VECTOR` blob,
+  `AVG_COL_LEN` double(53,4),
+  `MAX_COL_LEN` bigint(20),
+  `NUM_TRUES` bigint(20),
+  `NUM_FALSES` bigint(20),
+  `LAST_ANALYZED` bigint(20) NOT NULL,
+   PRIMARY KEY (`CS_ID`),
+   CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME) USING BTREE;
+ 
+ --
+ -- Table structure for table `TYPES`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `TYPES` (
+   `TYPES_ID` bigint(20) NOT NULL,
+   `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`TYPES_ID`),
+   UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `TYPE_FIELDS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` (
+   `TYPE_NAME` bigint(20) NOT NULL,
+   `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
+   KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
+   CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ -- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
+ CREATE TABLE IF NOT EXISTS `MASTER_KEYS`
+ (
+     `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT,
+     `MASTER_KEY` VARCHAR(767) BINARY NULL,
+     PRIMARY KEY (`KEY_ID`)
+ ) ENGINE=INNODB DEFAULT CHARSET=latin1;
+ 
+ -- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+ CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS`
+ (
+     `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL,
+     `TOKEN` VARCHAR(767) BINARY NULL,
+     PRIMARY KEY (`TOKEN_IDENT`)
+ ) ENGINE=INNODB DEFAULT CHARSET=latin1;
+ 
+ --
+ -- Table structure for VERSION
+ --
+ CREATE TABLE IF NOT EXISTS `VERSION` (
+   `VER_ID` BIGINT NOT NULL,
+   `SCHEMA_VERSION` VARCHAR(127) NOT NULL,
+   `VERSION_COMMENT` VARCHAR(255),
+   PRIMARY KEY (`VER_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ --
+ -- Table structure for table FUNCS
+ --
+ CREATE TABLE IF NOT EXISTS `FUNCS` (
+   `FUNC_ID` BIGINT(20) NOT NULL,
+   `CLASS_NAME` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+   `CREATE_TIME` INT(11) NOT NULL,
+   `DB_ID` BIGINT(20),
+   `FUNC_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
+   `FUNC_TYPE` INT(11) NOT NULL,
+   `OWNER_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
+   `OWNER_TYPE` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin,
+   PRIMARY KEY (`FUNC_ID`),
+   UNIQUE KEY `UNIQUEFUNCTION` (`FUNC_NAME`, `DB_ID`),
+   KEY `FUNCS_N49` (`DB_ID`),
+   CONSTRAINT `FUNCS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ --
+ -- Table structure for table FUNC_RU
+ --
+ CREATE TABLE IF NOT EXISTS `FUNC_RU` (
+   `FUNC_ID` BIGINT(20) NOT NULL,
+   `RESOURCE_TYPE` INT(11) NOT NULL,
+   `RESOURCE_URI` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+   `INTEGER_IDX` INT(11) NOT NULL,
+   PRIMARY KEY (`FUNC_ID`, `INTEGER_IDX`),
+   CONSTRAINT `FUNC_RU_FK1` FOREIGN KEY (`FUNC_ID`) REFERENCES `FUNCS` (`FUNC_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE IF NOT EXISTS `NOTIFICATION_LOG`
+ (
+     `NL_ID` BIGINT(20) NOT NULL,
+     `EVENT_ID` BIGINT(20) NOT NULL,
+     `EVENT_TIME` INT(11) NOT NULL,
+     `EVENT_TYPE` varchar(32) NOT NULL,
+     `CAT_NAME` varchar(256),
+     `DB_NAME` varchar(128),
+     `TBL_NAME` varchar(256),
+     `MESSAGE` longtext,
+     `MESSAGE_FORMAT` varchar(16),
+     PRIMARY KEY (`NL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE IF NOT EXISTS `NOTIFICATION_SEQUENCE`
+ (
+     `NNI_ID` BIGINT(20) NOT NULL,
+     `NEXT_EVENT_ID` BIGINT(20) NOT NULL,
+     PRIMARY KEY (`NNI_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ INSERT INTO `NOTIFICATION_SEQUENCE` (`NNI_ID`, `NEXT_EVENT_ID`) SELECT * from (select 1 as `NNI_ID`, 1 as `NOTIFICATION_SEQUENCE`) a WHERE (SELECT COUNT(*) FROM `NOTIFICATION_SEQUENCE`) = 0;
+ 
+ CREATE TABLE IF NOT EXISTS `KEY_CONSTRAINTS`
+ (
+   `CHILD_CD_ID` BIGINT,
+   `CHILD_INTEGER_IDX` INT(11),
+   `CHILD_TBL_ID` BIGINT,
+   `PARENT_CD_ID` BIGINT,
+   `PARENT_INTEGER_IDX` INT(11) NOT NULL,
+   `PARENT_TBL_ID` BIGINT NOT NULL,
+   `POSITION` BIGINT NOT NULL,
+   `CONSTRAINT_NAME` VARCHAR(400) NOT NULL,
+   `CONSTRAINT_TYPE` SMALLINT(6)  NOT NULL,
+   `UPDATE_RULE` SMALLINT(6),
+   `DELETE_RULE` SMALLINT(6),
+   `ENABLE_VALIDATE_RELY` SMALLINT(6) NOT NULL,
+   `DEFAULT_VALUE` VARCHAR(400),
+   PRIMARY KEY (`CONSTRAINT_NAME`, `POSITION`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE INDEX `CONSTRAINTS_PARENT_TABLE_ID_INDEX` ON KEY_CONSTRAINTS (`PARENT_TBL_ID`) USING BTREE;
+ 
+ CREATE INDEX `CONSTRAINTS_CONSTRAINT_TYPE_INDEX` ON KEY_CONSTRAINTS (`CONSTRAINT_TYPE`) USING BTREE;
+ 
+ -- -----------------------------
+ -- Metastore DB Properties table
+ -- -----------------------------
+ CREATE TABLE IF NOT EXISTS `METASTORE_DB_PROPERTIES` (
+   `PROPERTY_KEY` varchar(255) NOT NULL,
+   `PROPERTY_VALUE` varchar(1000) NOT NULL,
+   `DESCRIPTION` varchar(1000),
+  PRIMARY KEY(`PROPERTY_KEY`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ 
+ -- ---------------------
+ -- Resource plan tables.
+ -- ---------------------
+ CREATE TABLE IF NOT EXISTS WM_RESOURCEPLAN (
+     `RP_ID` bigint(20) NOT NULL,
+     `NAME` varchar(128) NOT NULL,
+     `QUERY_PARALLELISM` int(11),
+     `STATUS` varchar(20) NOT NULL,
+     `DEFAULT_POOL_ID` bigint(20),
+     PRIMARY KEY (`RP_ID`),
+     UNIQUE KEY `UNIQUE_WM_RESOURCEPLAN` (`NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE IF NOT EXISTS WM_POOL
+ (
+     `POOL_ID` bigint(20) NOT NULL,
+     `RP_ID` bigint(20) NOT NULL,
+     `PATH` varchar(767) NOT NULL,
+     `ALLOC_FRACTION` DOUBLE,
+     `QUERY_PARALLELISM` int(11),
+     `SCHEDULING_POLICY` varchar(767),
+     PRIMARY KEY (`POOL_ID`),
+     UNIQUE KEY `UNIQUE_WM_POOL` (`RP_ID`, `PATH`),
+     CONSTRAINT `WM_POOL_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ ALTER TABLE `WM_RESOURCEPLAN` ADD CONSTRAINT `WM_RESOURCEPLAN_FK1` FOREIGN KEY (`DEFAULT_POOL_ID`) REFERENCES `WM_POOL`(`POOL_ID`);
+ 
+ CREATE TABLE IF NOT EXISTS WM_TRIGGER
+ (
+     `TRIGGER_ID` bigint(20) NOT NULL,
+     `RP_ID` bigint(20) NOT NULL,
+     `NAME` varchar(128) NOT NULL,
+     `TRIGGER_EXPRESSION` varchar(1024),
+     `ACTION_EXPRESSION` varchar(1024),
+     `IS_IN_UNMANAGED` bit(1) NOT NULL DEFAULT 0,
+     PRIMARY KEY (`TRIGGER_ID`),
+     UNIQUE KEY `UNIQUE_WM_TRIGGER` (`RP_ID`, `NAME`),
+     CONSTRAINT `WM_TRIGGER_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE IF NOT EXISTS WM_POOL_TO_TRIGGER
+ (
+     `POOL_ID` bigint(20) NOT NULL,
+     `TRIGGER_ID` bigint(20) NOT NULL,
+     PRIMARY KEY (`POOL_ID`, `TRIGGER_ID`),
+     CONSTRAINT `WM_POOL_TO_TRIGGER_FK1` FOREIGN KEY (`POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`),
+     CONSTRAINT `WM_POOL_TO_TRIGGER_FK2` FOREIGN KEY (`TRIGGER_ID`) REFERENCES `WM_TRIGGER` (`TRIGGER_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE IF NOT EXISTS WM_MAPPING
+ (
+     `MAPPING_ID` bigint(20) NOT NULL,
+     `RP_ID` bigint(20) NOT NULL,
+     `ENTITY_TYPE` varchar(128) NOT NULL,
+     `ENTITY_NAME` varchar(128) NOT NULL,
+     `POOL_ID` bigint(20),
+     `ORDERING` int,
+     PRIMARY KEY (`MAPPING_ID`),
+     UNIQUE KEY `UNIQUE_WM_MAPPING` (`RP_ID`, `ENTITY_TYPE`, `ENTITY_NAME`),
+     CONSTRAINT `WM_MAPPING_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`),
+     CONSTRAINT `WM_MAPPING_FK2` FOREIGN KEY (`POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ -- ----------------------------
+ -- Transaction and Lock Tables
+ -- ----------------------------
+ CREATE TABLE TXNS (
+   TXN_ID bigint PRIMARY KEY,
+   TXN_STATE char(1) NOT NULL,
+   TXN_STARTED bigint NOT NULL,
+   TXN_LAST_HEARTBEAT bigint NOT NULL,
+   TXN_USER varchar(128) NOT NULL,
+   TXN_HOST varchar(128) NOT NULL,
+   TXN_AGENT_INFO varchar(128),
+   TXN_META_INFO varchar(128),
+   TXN_HEARTBEAT_COUNT int,
+   TXN_TYPE int
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE TXN_COMPONENTS (
+   TC_TXNID bigint NOT NULL,
+   TC_DATABASE varchar(128) NOT NULL,
+   TC_TABLE varchar(128),
+   TC_PARTITION varchar(767),
+   TC_OPERATION_TYPE char(1) NOT NULL,
+   TC_WRITEID bigint,
+   FOREIGN KEY (TC_TXNID) REFERENCES TXNS (TXN_ID)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
+ 
+ CREATE TABLE COMPLETED_TXN_COMPONENTS (
+   CTC_TXNID bigint NOT NULL,
+   CTC_DATABASE varchar(128) NOT NULL,
+   CTC_TABLE varchar(256),
+   CTC_PARTITION varchar(767),
+   CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL,
+   CTC_WRITEID bigint,
+   CTC_UPDATE_DELETE char(1) NOT NULL
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION) USING BTREE;
+ 
+ CREATE TABLE NEXT_TXN_ID (
+   NTXN_NEXT bigint NOT NULL
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ INSERT INTO NEXT_TXN_ID VALUES(1);
+ 
+ CREATE TABLE HIVE_LOCKS (
+   HL_LOCK_EXT_ID bigint NOT NULL,
+   HL_LOCK_INT_ID bigint NOT NULL,
+   HL_TXNID bigint NOT NULL,
+   HL_DB varchar(128) NOT NULL,
+   HL_TABLE varchar(128),
+   HL_PARTITION varchar(767),
+   HL_LOCK_STATE char(1) not null,
+   HL_LOCK_TYPE char(1) not null,
+   HL_LAST_HEARTBEAT bigint NOT NULL,
+   HL_ACQUIRED_AT bigint,
+   HL_USER varchar(128) NOT NULL,
+   HL_HOST varchar(128) NOT NULL,
+   HL_HEARTBEAT_COUNT int,
+   HL_AGENT_INFO varchar(128),
+   HL_BLOCKEDBY_EXT_ID bigint,
+   HL_BLOCKEDBY_INT_ID bigint,
+   PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID),
+   KEY HIVE_LOCK_TXNID_INDEX (HL_TXNID)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE INDEX HL_TXNID_IDX ON HIVE_LOCKS (HL_TXNID);
+ 
+ CREATE TABLE NEXT_LOCK_ID (
+   NL_NEXT bigint NOT NULL
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ INSERT INTO NEXT_LOCK_ID VALUES(1);
+ 
+ CREATE TABLE COMPACTION_QUEUE (
+   CQ_ID bigint PRIMARY KEY,
+   CQ_DATABASE varchar(128) NOT NULL,
+   CQ_TABLE varchar(128) NOT NULL,
+   CQ_PARTITION varchar(767),
+   CQ_STATE char(1) NOT NULL,
+   CQ_TYPE char(1) NOT NULL,
+   CQ_TBLPROPERTIES varchar(2048),
+   CQ_WORKER_ID varchar(128),
+   CQ_START bigint,
+   CQ_RUN_AS varchar(128),
+   CQ_HIGHEST_WRITE_ID bigint,
+   CQ_META_INFO varbinary(2048),
+   CQ_HADOOP_JOB_ID varchar(32)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE COMPLETED_COMPACTIONS (
+   CC_ID bigint PRIMARY KEY,
+   CC_DATABASE varchar(128) NOT NULL,
+   CC_TABLE varchar(128) NOT NULL,
+   CC_PARTITION varchar(767),
+   CC_STATE char(1) NOT NULL,
+   CC_TYPE char(1) NOT NULL,
+   CC_TBLPROPERTIES varchar(2048),
+   CC_WORKER_ID varchar(128),
+   CC_START bigint,
+   CC_END bigint,
+   CC_RUN_AS varchar(128),
+   CC_HIGHEST_WRITE_ID bigint,
+   CC_META_INFO varbinary(2048),
+   CC_HADOOP_JOB_ID varchar(32)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+   NCQ_NEXT bigint NOT NULL
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+ 
+ CREATE TABLE AUX_TABLE (
+   MT_KEY1 varchar(128) NOT NULL,
+   MT_KEY2 bigint NOT NULL,
+   MT_COMMENT varchar(255),
+   PRIMARY KEY(MT_KEY1, MT_KEY2)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE WRITE_SET (
+   WS_DATABASE varchar(128) NOT NULL,
+   WS_TABLE varchar(128) NOT NULL,
+   WS_PARTITION varchar(767),
+   WS_TXNID bigint NOT NULL,
+   WS_COMMIT_ID bigint NOT NULL,
+   WS_OPERATION_TYPE char(1) NOT NULL
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE TXN_TO_WRITE_ID (
+   T2W_TXNID bigint NOT NULL,
+   T2W_DATABASE varchar(128) NOT NULL,
+   T2W_TABLE varchar(256) NOT NULL,
+   T2W_WRITEID bigint NOT NULL
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID);
+ CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID);
+ 
+ CREATE TABLE NEXT_WRITE_ID (
+   NWI_DATABASE varchar(128) NOT NULL,
+   NWI_TABLE varchar(256) NOT NULL,
+   NWI_NEXT bigint NOT NULL
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE);
+ 
+ CREATE TABLE MIN_HISTORY_LEVEL (
+   MHL_TXNID bigint NOT NULL,
+   MHL_MIN_OPEN_TXNID bigint NOT NULL,
+   PRIMARY KEY(MHL_TXNID)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
+ 
+ CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+   MRL_TXN_ID bigint NOT NULL,
+   MRL_DB_NAME VARCHAR(128) NOT NULL,
+   MRL_TBL_NAME VARCHAR(256) NOT NULL,
+   MRL_LAST_HEARTBEAT bigint NOT NULL,
+   PRIMARY KEY(MRL_TXN_ID)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE `I_SCHEMA` (
+   `SCHEMA_ID` BIGINT PRIMARY KEY,
+   `SCHEMA_TYPE` INTEGER NOT NULL,
+   `NAME` VARCHAR(256),
+   `DB_ID` BIGINT,
+   `COMPATIBILITY` INTEGER NOT NULL,
+   `VALIDATION_LEVEL` INTEGER NOT NULL,
+   `CAN_EVOLVE` bit(1) NOT NULL,
+   `SCHEMA_GROUP` VARCHAR(256),
+   `DESCRIPTION` VARCHAR(4000),
+   FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
+   KEY `UNIQUE_NAME` (`NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE `SCHEMA_VERSION` (
+   `SCHEMA_VERSION_ID` bigint primary key,
+   `SCHEMA_ID` BIGINT,
+   `VERSION` INTEGER NOT NULL,
+   `CREATED_AT` BIGINT NOT NULL,
+   `CD_ID` BIGINT, 
+   `STATE` INTEGER NOT NULL,
+   `DESCRIPTION` VARCHAR(4000),
+   `SCHEMA_TEXT` mediumtext,
+   `FINGERPRINT` VARCHAR(256),
+   `SCHEMA_VERSION_NAME` VARCHAR(256),
+   `SERDE_ID` bigint, 
+   FOREIGN KEY (`SCHEMA_ID`) REFERENCES `I_SCHEMA` (`SCHEMA_ID`),
+   FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`),
+   FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
+   KEY `UNIQUE_VERSION` (`SCHEMA_ID`, `VERSION`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE REPL_TXN_MAP (
+   RTM_REPL_POLICY varchar(256) NOT NULL,
+   RTM_SRC_TXN_ID bigint NOT NULL,
+   RTM_TARGET_TXN_ID bigint NOT NULL,
+   PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ 
+ CREATE TABLE RUNTIME_STATS (
+   RS_ID bigint primary key,
+   CREATE_TIME bigint NOT NULL,
+   WEIGHT bigint NOT NULL,
+   PAYLOAD blob
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+ 
+ CREATE TABLE TXN_WRITE_NOTIFICATION_LOG (
+   WNL_ID bigint NOT NULL,
+   WNL_TXNID bigint NOT NULL,
+   WNL_WRITEID bigint NOT NULL,
+   WNL_DATABASE varchar(128) NOT NULL,
+   WNL_TABLE varchar(128) NOT NULL,
+   WNL_PARTITION varchar(1024) NOT NULL,
+   WNL_TABLE_OBJ longtext NOT NULL,
+   WNL_PARTITION_OBJ longtext,
+   WNL_FILES longtext,
+   WNL_EVENT_TIME INT(11) NOT NULL,
+   PRIMARY KEY (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ INSERT INTO `SEQUENCE_TABLE` (`SEQUENCE_NAME`, `NEXT_VAL`) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1);
+ 
+ -- -----------------------------------------------------------------
+ -- Record schema version. Should be the last step in the init script
+ -- -----------------------------------------------------------------
+ INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.0.0', 'Hive release version 4.0.0');
+ 
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ /*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+ 
+ /*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+ /*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+ /*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+ /*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+ /*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+ /*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+ /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+ 
+ -- Dump completed on 2012-08-23  0:56:31

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql
index 0000000,b3789f9..89265ad
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql
@@@ -1,0 -1,6 +1,10 @@@
+ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0' AS ' ';
+ 
++-- HIVE-19416
++ALTER TABLE TBLS ADD WRITE_ID bigint;
++ALTER TABLE PARTITIONS ADD WRITE_ID bigint;
++
+ -- These lines need to be last.  Insert any changes above.
+ UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
+ SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0' AS ' ';
+ 


[19/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
index 0000000,c2bbba5..7b32c08
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java
@@@ -1,0 -1,1686 +1,1688 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  * <p>
+  * http://www.apache.org/licenses/LICENSE-2.0
+  * <p>
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.conf;
+ 
+ import com.google.common.annotations.VisibleForTesting;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.DefaultStorageSchemaReader;
+ import org.apache.hadoop.hive.metastore.HiveAlterHandler;
+ import org.apache.hadoop.hive.metastore.MaterializationsRebuildLockCleanerTask;
+ import org.apache.hadoop.hive.metastore.MetastoreTaskThread;
+ import org.apache.hadoop.hive.metastore.RuntimeStatsCleanerTask;
+ import org.apache.hadoop.hive.metastore.events.EventCleanerTask;
+ import org.apache.hadoop.hive.metastore.security.MetastoreDelegationTokenManager;
+ import org.apache.hadoop.hive.metastore.txn.AcidCompactionHistoryService;
+ import org.apache.hadoop.hive.metastore.txn.AcidHouseKeeperService;
+ import org.apache.hadoop.hive.metastore.txn.AcidOpenTxnsCounterService;
+ import org.apache.hadoop.hive.metastore.txn.AcidWriteSetService;
+ import org.apache.hadoop.hive.metastore.utils.StringUtils;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import java.io.File;
+ import java.io.IOException;
+ import java.net.URI;
+ import java.net.URL;
+ import java.util.Arrays;
+ import java.util.Collection;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Set;
+ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.atomic.AtomicBoolean;
+ import java.util.regex.Matcher;
+ import java.util.regex.Pattern;
+ 
+ /**
+  * A set of definitions of config values used by the Metastore.  One of the key aims of this
+  * class is to provide backwards compatibility with existing Hive configuration keys while
+  * allowing the metastore to have its own, Hive independent keys.   For this reason access to the
+  * underlying Configuration object should always be done via the static methods provided here
+  * rather than directly via {@link Configuration#get(String)} and
+  * {@link Configuration#set(String, String)}.  All the methods of this class will handle checking
+  * both the MetastoreConf key and the Hive key.  The algorithm is, on reads, to check first the
+  * MetastoreConf key, then the Hive key, then return the default if neither are set.  On write
+  * the Metastore key only is set.
+  *
+  * This class does not extend Configuration.  Rather it provides static methods for operating on
+  * a Configuration object.  This allows it to work on HiveConf objects, which otherwise would not
+  * be the case.
+  */
+ public class MetastoreConf {
+ 
+   private static final Logger LOG = LoggerFactory.getLogger(MetastoreConf.class);
+   private static final Pattern TIME_UNIT_SUFFIX = Pattern.compile("([0-9]+)([a-zA-Z]+)");
+ 
+   private static final Map<String, ConfVars> metaConfs = new HashMap<>();
+   private static URL hiveDefaultURL = null;
+   private static URL hiveSiteURL = null;
+   private static URL hiveMetastoreSiteURL = null;
+   private static URL metastoreSiteURL = null;
+   private static AtomicBoolean beenDumped = new AtomicBoolean();
+ 
+   private static Map<String, ConfVars> keyToVars;
+ 
+   @VisibleForTesting
+   static final String TEST_ENV_WORKAROUND = "metastore.testing.env.workaround.dont.ever.set.this.";
+ 
+   public static enum StatsUpdateMode {
+     NONE, EXISTING, ALL
+   }
+ 
+   private static class TimeValue {
+     final long val;
+     final TimeUnit unit;
+ 
+     private TimeValue(long val, TimeUnit unit) {
+       this.val = val;
+       this.unit = unit;
+     }
+ 
+     @Override
+     public String toString() {
+       switch (unit) {
+       case NANOSECONDS: return Long.toString(val) + "ns";
+       case MICROSECONDS: return Long.toString(val) + "us";
+       case MILLISECONDS: return Long.toString(val) + "ms";
+       case SECONDS: return Long.toString(val) + "s";
+       case MINUTES: return Long.toString(val) + "m";
+       case HOURS: return Long.toString(val) + "h";
+       case DAYS: return Long.toString(val) + "d";
+       }
+       throw new RuntimeException("Unknown time unit " + unit);
+     }
+   }
+ 
+   /**
+    * Metastore related options that the db is initialized against. When a conf
+    * var in this is list is changed, the metastore instance for the CLI will
+    * be recreated so that the change will take effect.
+    * TODO - I suspect the vast majority of these don't need to be here.  But it requires testing
+    * before just pulling them out.
+    */
+   public static final MetastoreConf.ConfVars[] metaVars = {
+       ConfVars.WAREHOUSE,
+       ConfVars.REPLDIR,
+       ConfVars.THRIFT_URIS,
+       ConfVars.SERVER_PORT,
+       ConfVars.THRIFT_CONNECTION_RETRIES,
+       ConfVars.THRIFT_FAILURE_RETRIES,
+       ConfVars.CLIENT_CONNECT_RETRY_DELAY,
+       ConfVars.CLIENT_SOCKET_TIMEOUT,
+       ConfVars.CLIENT_SOCKET_LIFETIME,
+       ConfVars.PWD,
+       ConfVars.CONNECT_URL_HOOK,
+       ConfVars.CONNECT_URL_KEY,
+       ConfVars.SERVER_MIN_THREADS,
+       ConfVars.SERVER_MAX_THREADS,
+       ConfVars.TCP_KEEP_ALIVE,
+       ConfVars.KERBEROS_KEYTAB_FILE,
+       ConfVars.KERBEROS_PRINCIPAL,
+       ConfVars.USE_THRIFT_SASL,
+       ConfVars.TOKEN_SIGNATURE,
+       ConfVars.CACHE_PINOBJTYPES,
+       ConfVars.CONNECTION_POOLING_TYPE,
+       ConfVars.VALIDATE_TABLES,
+       ConfVars.DATANUCLEUS_INIT_COL_INFO,
+       ConfVars.VALIDATE_COLUMNS,
+       ConfVars.VALIDATE_CONSTRAINTS,
+       ConfVars.STORE_MANAGER_TYPE,
+       ConfVars.AUTO_CREATE_ALL,
+       ConfVars.DATANUCLEUS_TRANSACTION_ISOLATION,
+       ConfVars.DATANUCLEUS_CACHE_LEVEL2,
+       ConfVars.DATANUCLEUS_CACHE_LEVEL2_TYPE,
+       ConfVars.IDENTIFIER_FACTORY,
+       ConfVars.DATANUCLEUS_PLUGIN_REGISTRY_BUNDLE_CHECK,
+       ConfVars.AUTHORIZATION_STORAGE_AUTH_CHECKS,
+       ConfVars.BATCH_RETRIEVE_MAX,
+       ConfVars.EVENT_LISTENERS,
+       ConfVars.TRANSACTIONAL_EVENT_LISTENERS,
+       ConfVars.EVENT_CLEAN_FREQ,
+       ConfVars.EVENT_EXPIRY_DURATION,
+       ConfVars.EVENT_MESSAGE_FACTORY,
+       ConfVars.FILTER_HOOK,
+       ConfVars.RAW_STORE_IMPL,
+       ConfVars.END_FUNCTION_LISTENERS,
+       ConfVars.PART_INHERIT_TBL_PROPS,
+       ConfVars.BATCH_RETRIEVE_OBJECTS_MAX,
+       ConfVars.INIT_HOOKS,
+       ConfVars.PRE_EVENT_LISTENERS,
+       ConfVars.HMS_HANDLER_ATTEMPTS,
+       ConfVars.HMS_HANDLER_INTERVAL,
+       ConfVars.HMS_HANDLER_FORCE_RELOAD_CONF,
+       ConfVars.PARTITION_NAME_WHITELIST_PATTERN,
+       ConfVars.ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS,
+       ConfVars.USERS_IN_ADMIN_ROLE,
+       ConfVars.HIVE_TXN_MANAGER,
+       ConfVars.TXN_TIMEOUT,
+       ConfVars.TXN_MAX_OPEN_BATCH,
+       ConfVars.TXN_RETRYABLE_SQLEX_REGEX,
+       ConfVars.STATS_NDV_TUNER,
+       ConfVars.STATS_NDV_DENSITY_FUNCTION,
+       ConfVars.AGGREGATE_STATS_CACHE_ENABLED,
+       ConfVars.AGGREGATE_STATS_CACHE_SIZE,
+       ConfVars.AGGREGATE_STATS_CACHE_MAX_PARTITIONS,
+       ConfVars.AGGREGATE_STATS_CACHE_FPP,
+       ConfVars.AGGREGATE_STATS_CACHE_MAX_VARIANCE,
+       ConfVars.AGGREGATE_STATS_CACHE_TTL,
+       ConfVars.AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT,
+       ConfVars.AGGREGATE_STATS_CACHE_MAX_READER_WAIT,
+       ConfVars.AGGREGATE_STATS_CACHE_MAX_FULL,
+       ConfVars.AGGREGATE_STATS_CACHE_CLEAN_UNTIL,
+       ConfVars.DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES,
+       ConfVars.FILE_METADATA_THREADS
+   };
+ 
+   /**
+    * User configurable Metastore vars
+    */
+   private static final MetastoreConf.ConfVars[] metaConfVars = {
+       ConfVars.TRY_DIRECT_SQL,
+       ConfVars.TRY_DIRECT_SQL_DDL,
+       ConfVars.CLIENT_SOCKET_TIMEOUT,
+       ConfVars.PARTITION_NAME_WHITELIST_PATTERN,
+       ConfVars.CAPABILITY_CHECK,
+       ConfVars.DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES
+   };
+ 
+   static {
+     for (ConfVars confVar : metaConfVars) {
+       metaConfs.put(confVar.varname, confVar);
+       metaConfs.put(confVar.hiveName, confVar);
+     }
+   }
+ 
+   /**
+    * Variables that we should never print the value of for security reasons.
+    */
+   private static final Set<String> unprintables = StringUtils.asSet(
+       ConfVars.PWD.varname,
+       ConfVars.PWD.hiveName,
+       ConfVars.SSL_KEYSTORE_PASSWORD.varname,
+       ConfVars.SSL_KEYSTORE_PASSWORD.hiveName,
+       ConfVars.SSL_TRUSTSTORE_PASSWORD.varname,
+       ConfVars.SSL_TRUSTSTORE_PASSWORD.hiveName
+   );
+ 
+   public static ConfVars getMetaConf(String name) {
+     return metaConfs.get(name);
+   }
+ 
+   public enum ConfVars {
+     // alpha order, PLEASE!
+     ADDED_JARS("metastore.added.jars.path", "hive.added.jars.path", "",
+         "This an internal parameter."),
+     AGGREGATE_STATS_CACHE_CLEAN_UNTIL("metastore.aggregate.stats.cache.clean.until",
+         "hive.metastore.aggregate.stats.cache.clean.until", 0.8,
+         "The cleaner thread cleans until cache reaches this % full size."),
+     AGGREGATE_STATS_CACHE_ENABLED("metastore.aggregate.stats.cache.enabled",
+         "hive.metastore.aggregate.stats.cache.enabled", true,
+         "Whether aggregate stats caching is enabled or not."),
+     AGGREGATE_STATS_CACHE_FPP("metastore.aggregate.stats.cache.fpp",
+         "hive.metastore.aggregate.stats.cache.fpp", 0.01,
+         "Maximum false positive probability for the Bloom Filter used in each aggregate stats cache node (default 1%)."),
+     AGGREGATE_STATS_CACHE_MAX_FULL("metastore.aggregate.stats.cache.max.full",
+         "hive.metastore.aggregate.stats.cache.max.full", 0.9,
+         "Maximum cache full % after which the cache cleaner thread kicks in."),
+     AGGREGATE_STATS_CACHE_MAX_PARTITIONS("metastore.aggregate.stats.cache.max.partitions",
+         "hive.metastore.aggregate.stats.cache.max.partitions", 10000,
+         "Maximum number of partitions that are aggregated per cache node."),
+     AGGREGATE_STATS_CACHE_MAX_READER_WAIT("metastore.aggregate.stats.cache.max.reader.wait",
+         "hive.metastore.aggregate.stats.cache.max.reader.wait", 1000, TimeUnit.MILLISECONDS,
+         "Number of milliseconds a reader will wait to acquire the readlock before giving up."),
+     AGGREGATE_STATS_CACHE_MAX_VARIANCE("metastore.aggregate.stats.cache.max.variance",
+         "hive.metastore.aggregate.stats.cache.max.variance", 0.01,
+         "Maximum tolerable variance in number of partitions between a cached node and our request (default 1%)."),
+     AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT("metastore.aggregate.stats.cache.max.writer.wait",
+         "hive.metastore.aggregate.stats.cache.max.writer.wait", 5000, TimeUnit.MILLISECONDS,
+         "Number of milliseconds a writer will wait to acquire the writelock before giving up."),
+     AGGREGATE_STATS_CACHE_SIZE("metastore.aggregate.stats.cache.size",
+         "hive.metastore.aggregate.stats.cache.size", 10000,
+         "Maximum number of aggregate stats nodes that we will place in the metastore aggregate stats cache."),
+     AGGREGATE_STATS_CACHE_TTL("metastore.aggregate.stats.cache.ttl",
+         "hive.metastore.aggregate.stats.cache.ttl", 600, TimeUnit.SECONDS,
+         "Number of seconds for a cached node to be active in the cache before they become stale."),
+     ALTER_HANDLER("metastore.alter.handler", "hive.metastore.alter.impl",
+         HiveAlterHandler.class.getName(),
+         "Alter handler.  For now defaults to the Hive one.  Really need a better default option"),
+     ASYNC_LOG_ENABLED("metastore.async.log.enabled", "hive.async.log.enabled", true,
+         "Whether to enable Log4j2's asynchronous logging. Asynchronous logging can give\n" +
+             " significant performance improvement as logging will be handled in separate thread\n" +
+             " that uses LMAX disruptor queue for buffering log messages.\n" +
+             " Refer https://logging.apache.org/log4j/2.x/manual/async.html for benefits and\n" +
+             " drawbacks."),
+     AUTHORIZATION_STORAGE_AUTH_CHECKS("metastore.authorization.storage.checks",
+         "hive.metastore.authorization.storage.checks", false,
+         "Should the metastore do authorization checks against the underlying storage (usually hdfs) \n" +
+             "for operations like drop-partition (disallow the drop-partition if the user in\n" +
+             "question doesn't have permissions to delete the corresponding directory\n" +
+             "on the storage)."),
+     AUTO_CREATE_ALL("datanucleus.schema.autoCreateAll", "datanucleus.schema.autoCreateAll", false,
+         "Auto creates necessary schema on a startup if one doesn't exist. Set this to false, after creating it once."
+             + "To enable auto create also set hive.metastore.schema.verification=false. Auto creation is not "
+             + "recommended for production use cases, run schematool command instead." ),
+     BATCH_RETRIEVE_MAX("metastore.batch.retrieve.max", "hive.metastore.batch.retrieve.max", 300,
+         "Maximum number of objects (tables/partitions) can be retrieved from metastore in one batch. \n" +
+             "The higher the number, the less the number of round trips is needed to the Hive metastore server, \n" +
+             "but it may also cause higher memory requirement at the client side."),
+     BATCH_RETRIEVE_OBJECTS_MAX("metastore.batch.retrieve.table.partition.max",
+         "hive.metastore.batch.retrieve.table.partition.max", 1000,
+         "Maximum number of objects that metastore internally retrieves in one batch."),
+     CACHE_PINOBJTYPES("metastore.cache.pinobjtypes", "hive.metastore.cache.pinobjtypes",
+         "Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order",
+         "List of comma separated metastore object types that should be pinned in the cache"),
+     CACHED_RAW_STORE_IMPL("metastore.cached.rawstore.impl",
+         "hive.metastore.cached.rawstore.impl", "org.apache.hadoop.hive.metastore.ObjectStore",
+         "Name of the wrapped RawStore class"),
+     CACHED_RAW_STORE_CACHE_UPDATE_FREQUENCY("metastore.cached.rawstore.cache.update.frequency",
+         "hive.metastore.cached.rawstore.cache.update.frequency", 60, TimeUnit.SECONDS,
+         "The time after which metastore cache is updated from metastore DB."),
+     CACHED_RAW_STORE_CACHED_OBJECTS_WHITELIST("metastore.cached.rawstore.cached.object.whitelist",
+         "hive.metastore.cached.rawstore.cached.object.whitelist", ".*", "Comma separated list of regular expressions \n " +
+         "to select the tables (and its partitions, stats etc) that will be cached by CachedStore. \n" +
+         "This can be used in conjunction with hive.metastore.cached.rawstore.cached.object.blacklist. \n" +
+         "Example: .*, db1.*, db2\\.tbl.*. The last item can potentially override patterns specified before."),
+     CACHED_RAW_STORE_CACHED_OBJECTS_BLACKLIST("metastore.cached.rawstore.cached.object.blacklist",
+          "hive.metastore.cached.rawstore.cached.object.blacklist", "", "Comma separated list of regular expressions \n " +
+          "to filter out the tables (and its partitions, stats etc) that will be cached by CachedStore. \n" +
+          "This can be used in conjunction with hive.metastore.cached.rawstore.cached.object.whitelist. \n" +
+          "Example: db2.*, db3\\.tbl1, db3\\..*. The last item can potentially override patterns specified before. \n" +
+          "The blacklist also overrides the whitelist."),
+     CACHED_RAW_STORE_MAX_CACHE_MEMORY("metastore.cached.rawstore.max.cache.memory",
+         "hive.metastore.cached.rawstore.max.cache.memory", "1Gb", new SizeValidator(),
+         "The maximum memory in bytes that the cached objects can use. "
+         + "Memory used is calculated based on estimated size of tables and partitions in the cache. "
+         + "Setting it to a negative value disables memory estimation."),
+     CAPABILITY_CHECK("metastore.client.capability.check",
+         "hive.metastore.client.capability.check", true,
+         "Whether to check client capabilities for potentially breaking API usage."),
+     CATALOG_DEFAULT("metastore.catalog.default", "metastore.catalog.default", "hive",
+         "The default catalog to use when a catalog is not specified.  Default is 'hive' (the " +
+             "default catalog)."),
+     CATALOGS_TO_CACHE("metastore.cached.rawstore.catalogs", "metastore.cached.rawstore.catalogs",
+         "hive", "Comma separated list of catalogs to cache in the CachedStore. Default is 'hive' " +
+         "(the default catalog).  Empty string means all catalogs will be cached."),
+     CLIENT_CONNECT_RETRY_DELAY("metastore.client.connect.retry.delay",
+         "hive.metastore.client.connect.retry.delay", 1, TimeUnit.SECONDS,
+         "Number of seconds for the client to wait between consecutive connection attempts"),
+     CLIENT_KERBEROS_PRINCIPAL("metastore.client.kerberos.principal",
+         "hive.metastore.client.kerberos.principal",
+         "", // E.g. "hive-metastore/_HOST@EXAMPLE.COM".
+         "The Kerberos principal associated with the HA cluster of hcat_servers."),
+     CLIENT_SOCKET_LIFETIME("metastore.client.socket.lifetime",
+         "hive.metastore.client.socket.lifetime", 0, TimeUnit.SECONDS,
+         "MetaStore Client socket lifetime in seconds. After this time is exceeded, client\n" +
+             "reconnects on the next MetaStore operation. A value of 0s means the connection\n" +
+             "has an infinite lifetime."),
+     CLIENT_SOCKET_TIMEOUT("metastore.client.socket.timeout", "hive.metastore.client.socket.timeout", 600,
+             TimeUnit.SECONDS, "MetaStore Client socket timeout in seconds"),
+     COMPACTOR_HISTORY_REAPER_INTERVAL("metastore.compactor.history.reaper.interval",
+         "hive.compactor.history.reaper.interval", 2, TimeUnit.MINUTES,
+         "Determines how often compaction history reaper runs"),
+     COMPACTOR_HISTORY_RETENTION_ATTEMPTED("metastore.compactor.history.retention.attempted",
+         "hive.compactor.history.retention.attempted", 2,
+         new RangeValidator(0, 100), "Determines how many attempted compaction records will be " +
+         "retained in compaction history for a given table/partition."),
+     COMPACTOR_HISTORY_RETENTION_FAILED("metastore.compactor.history.retention.failed",
+         "hive.compactor.history.retention.failed", 3,
+         new RangeValidator(0, 100), "Determines how many failed compaction records will be " +
+         "retained in compaction history for a given table/partition."),
+     COMPACTOR_HISTORY_RETENTION_SUCCEEDED("metastore.compactor.history.retention.succeeded",
+         "hive.compactor.history.retention.succeeded", 3,
+         new RangeValidator(0, 100), "Determines how many successful compaction records will be " +
+         "retained in compaction history for a given table/partition."),
+     COMPACTOR_INITIATOR_FAILED_THRESHOLD("metastore.compactor.initiator.failed.compacts.threshold",
+         "hive.compactor.initiator.failed.compacts.threshold", 2,
+         new RangeValidator(1, 20), "Number of consecutive compaction failures (per table/partition) " +
+         "after which automatic compactions will not be scheduled any more.  Note that this must be less " +
+         "than hive.compactor.history.retention.failed."),
+     COMPACTOR_INITIATOR_ON("metastore.compactor.initiator.on", "hive.compactor.initiator.on", false,
+         "Whether to run the initiator and cleaner threads on this metastore instance or not.\n" +
+             "Set this to true on one instance of the Thrift metastore service as part of turning\n" +
+             "on Hive transactions. For a complete list of parameters required for turning on\n" +
+             "transactions, see hive.txn.manager."),
+     COMPACTOR_WORKER_THREADS("metastore.compactor.worker.threads",
+         "hive.compactor.worker.threads", 0,
+         "How many compactor worker threads to run on this metastore instance. Set this to a\n" +
+             "positive number on one or more instances of the Thrift metastore service as part of\n" +
+             "turning on Hive transactions. For a complete list of parameters required for turning\n" +
+             "on transactions, see hive.txn.manager.\n" +
+             "Worker threads spawn MapReduce jobs to do compactions. They do not do the compactions\n" +
+             "themselves. Increasing the number of worker threads will decrease the time it takes\n" +
+             "tables or partitions to be compacted once they are determined to need compaction.\n" +
+             "It will also increase the background load on the Hadoop cluster as more MapReduce jobs\n" +
+             "will be running in the background."),
+     CONNECTION_DRIVER("javax.jdo.option.ConnectionDriverName",
+         "javax.jdo.option.ConnectionDriverName", "org.apache.derby.jdbc.EmbeddedDriver",
+         "Driver class name for a JDBC metastore"),
+     CONNECTION_POOLING_MAX_CONNECTIONS("datanucleus.connectionPool.maxPoolSize",
+         "datanucleus.connectionPool.maxPoolSize", 10,
+         "Specify the maximum number of connections in the connection pool. Note: The configured size will be used by\n" +
+             "2 connection pools (TxnHandler and ObjectStore). When configuring the max connection pool size, it is\n" +
+             "recommended to take into account the number of metastore instances and the number of HiveServer2 instances\n" +
+             "configured with embedded metastore. To get optimal performance, set config to meet the following condition\n"+
+             "(2 * pool_size * metastore_instances + 2 * pool_size * HS2_instances_with_embedded_metastore) = \n" +
+             "(2 * physical_core_count + hard_disk_count)."),
+     CONNECT_URL_HOOK("metastore.ds.connection.url.hook",
+         "hive.metastore.ds.connection.url.hook", "",
+         "Name of the hook to use for retrieving the JDO connection URL. If empty, the value in javax.jdo.option.ConnectionURL is used"),
+     CONNECT_URL_KEY("javax.jdo.option.ConnectionURL",
+         "javax.jdo.option.ConnectionURL",
+         "jdbc:derby:;databaseName=metastore_db;create=true",
+         "JDBC connect string for a JDBC metastore.\n" +
+             "To use SSL to encrypt/authenticate the connection, provide database-specific SSL flag in the connection URL.\n" +
+             "For example, jdbc:postgresql://myhost/db?ssl=true for postgres database."),
+     CONNECTION_POOLING_TYPE("datanucleus.connectionPoolingType",
+         "datanucleus.connectionPoolingType", "HikariCP", new StringSetValidator("BONECP", "DBCP",
+         "HikariCP", "NONE"),
+         "Specify connection pool library for datanucleus"),
+     CONNECTION_USER_NAME("javax.jdo.option.ConnectionUserName",
+         "javax.jdo.option.ConnectionUserName", "APP",
+         "Username to use against metastore database"),
+     CREATE_TABLES_AS_ACID("metastore.create.as.acid", "hive.create.as.acid", false,
+         "Whether the eligible tables should be created as full ACID by default. Does \n" +
+             "not apply to external tables, the ones using storage handlers, etc."),
+     COUNT_OPEN_TXNS_INTERVAL("metastore.count.open.txns.interval", "hive.count.open.txns.interval",
+         1, TimeUnit.SECONDS, "Time in seconds between checks to count open transactions."),
+     DATANUCLEUS_AUTOSTART("datanucleus.autoStartMechanismMode",
+         "datanucleus.autoStartMechanismMode", "ignored", new StringSetValidator("ignored"),
+         "Autostart mechanism for datanucleus.  Currently ignored is the only option supported."),
+     DATANUCLEUS_CACHE_LEVEL2("datanucleus.cache.level2", "datanucleus.cache.level2", false,
+         "Use a level 2 cache. Turn this off if metadata is changed independently of Hive metastore server"),
+     DATANUCLEUS_CACHE_LEVEL2_TYPE("datanucleus.cache.level2.type",
+         "datanucleus.cache.level2.type", "none", ""),
+     DATANUCLEUS_INIT_COL_INFO("datanucleus.rdbms.initializeColumnInfo",
+         "datanucleus.rdbms.initializeColumnInfo", "NONE",
+         "initializeColumnInfo setting for DataNucleus; set to NONE at least on Postgres."),
+     DATANUCLEUS_PLUGIN_REGISTRY_BUNDLE_CHECK("datanucleus.plugin.pluginRegistryBundleCheck",
+         "datanucleus.plugin.pluginRegistryBundleCheck", "LOG",
+         "Defines what happens when plugin bundles are found and are duplicated [EXCEPTION|LOG|NONE]"),
+     DATANUCLEUS_TRANSACTION_ISOLATION("datanucleus.transactionIsolation",
+         "datanucleus.transactionIsolation", "read-committed",
+         "Default transaction isolation level for identity generation."),
+     DATANUCLEUS_USE_LEGACY_VALUE_STRATEGY("datanucleus.rdbms.useLegacyNativeValueStrategy",
+         "datanucleus.rdbms.useLegacyNativeValueStrategy", true, ""),
+     DBACCESS_SSL_PROPS("metastore.dbaccess.ssl.properties", "hive.metastore.dbaccess.ssl.properties", "",
+         "Comma-separated SSL properties for metastore to access database when JDO connection URL\n" +
+             "enables SSL access. e.g. javax.net.ssl.trustStore=/tmp/truststore,javax.net.ssl.trustStorePassword=pwd."),
+     DEFAULTPARTITIONNAME("metastore.default.partition.name",
+         "hive.exec.default.partition.name", "__HIVE_DEFAULT_PARTITION__",
+         "The default partition name in case the dynamic partition column value is null/empty string or any other values that cannot be escaped. \n" +
+             "This value must not contain any special character used in HDFS URI (e.g., ':', '%', '/' etc). \n" +
+             "The user has to be aware that the dynamic partition value should not contain this value to avoid confusions."),
+     DELEGATION_KEY_UPDATE_INTERVAL("metastore.cluster.delegation.key.update-interval",
+         "hive.cluster.delegation.key.update-interval", 1, TimeUnit.DAYS, ""),
+     DELEGATION_TOKEN_GC_INTERVAL("metastore.cluster.delegation.token.gc-interval",
+         "hive.cluster.delegation.token.gc-interval", 1, TimeUnit.HOURS, ""),
+     DELEGATION_TOKEN_MAX_LIFETIME("metastore.cluster.delegation.token.max-lifetime",
+         "hive.cluster.delegation.token.max-lifetime", 7, TimeUnit.DAYS, ""),
+     DELEGATION_TOKEN_RENEW_INTERVAL("metastore.cluster.delegation.token.renew-interval",
+       "hive.cluster.delegation.token.renew-interval", 1, TimeUnit.DAYS, ""),
+     DELEGATION_TOKEN_STORE_CLS("metastore.cluster.delegation.token.store.class",
+         "hive.cluster.delegation.token.store.class", MetastoreDelegationTokenManager.class.getName(),
+         "Class to store delegation tokens"),
+     DETACH_ALL_ON_COMMIT("javax.jdo.option.DetachAllOnCommit",
+         "javax.jdo.option.DetachAllOnCommit", true,
+         "Detaches all objects from session so that they can be used after transaction is committed"),
+     DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE("metastore.direct.sql.max.elements.in.clause",
+         "hive.direct.sql.max.elements.in.clause", 1000,
+         "The maximum number of values in a IN clause. Once exceeded, it will be broken into\n" +
+             " multiple OR separated IN clauses."),
+     DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE("metastore.direct.sql.max.elements.values.clause",
+         "hive.direct.sql.max.elements.values.clause",
+         1000, "The maximum number of values in a VALUES clause for INSERT statement."),
+     DIRECT_SQL_MAX_QUERY_LENGTH("metastore.direct.sql.max.query.length",
+         "hive.direct.sql.max.query.length", 100, "The maximum\n" +
+         " size of a query string (in KB)."),
+     DIRECT_SQL_PARTITION_BATCH_SIZE("metastore.direct.sql.batch.size",
+         "hive.metastore.direct.sql.batch.size", 0,
+         "Batch size for partition and other object retrieval from the underlying DB in direct\n" +
+             "SQL. For some DBs like Oracle and MSSQL, there are hardcoded or perf-based limitations\n" +
+             "that necessitate this. For DBs that can handle the queries, this isn't necessary and\n" +
+             "may impede performance. -1 means no batching, 0 means automatic batching."),
+     DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES("metastore.disallow.incompatible.col.type.changes",
+         "hive.metastore.disallow.incompatible.col.type.changes", true,
+         "If true, ALTER TABLE operations which change the type of a\n" +
+             "column (say STRING) to an incompatible type (say MAP) are disallowed.\n" +
+             "RCFile default SerDe (ColumnarSerDe) serializes the values in such a way that the\n" +
+             "datatypes can be converted from string to any type. The map is also serialized as\n" +
+             "a string, which can be read as a string as well. However, with any binary\n" +
+             "serialization, this is not true. Blocking the ALTER TABLE prevents ClassCastExceptions\n" +
+             "when subsequently trying to access old partitions.\n" +
+             "\n" +
+             "Primitive types like INT, STRING, BIGINT, etc., are compatible with each other and are\n" +
+             "not blocked.\n" +
+             "\n" +
+             "See HIVE-4409 for more details."),
+     DUMP_CONFIG_ON_CREATION("metastore.dump.config.on.creation", "metastore.dump.config.on.creation", true,
+         "If true, a printout of the config file (minus sensitive values) will be dumped to the " +
+             "log whenever newMetastoreConf() is called.  Can produce a lot of logs"),
+     END_FUNCTION_LISTENERS("metastore.end.function.listeners",
+         "hive.metastore.end.function.listeners", "",
+         "List of comma separated listeners for the end of metastore functions."),
+     EVENT_CLEAN_FREQ("metastore.event.clean.freq", "hive.metastore.event.clean.freq", 0,
+         TimeUnit.SECONDS, "Frequency at which timer task runs to purge expired events in metastore."),
+     EVENT_EXPIRY_DURATION("metastore.event.expiry.duration", "hive.metastore.event.expiry.duration",
+         0, TimeUnit.SECONDS, "Duration after which events expire from events table"),
+     EVENT_LISTENERS("metastore.event.listeners", "hive.metastore.event.listeners", "",
+         "A comma separated list of Java classes that implement the org.apache.riven.MetaStoreEventListener" +
+             " interface. The metastore event and corresponding listener method will be invoked in separate JDO transactions. " +
+             "Alternatively, configure hive.metastore.transactional.event.listeners to ensure both are invoked in same JDO transaction."),
+     EVENT_MESSAGE_FACTORY("metastore.event.message.factory",
+         "hive.metastore.event.message.factory",
+         "org.apache.hadoop.hive.metastore.messaging.json.JSONMessageFactory",
+         "Factory class for making encoding and decoding messages in the events generated."),
+     EVENT_DB_LISTENER_TTL("metastore.event.db.listener.timetolive",
+         "hive.metastore.event.db.listener.timetolive", 86400, TimeUnit.SECONDS,
+         "time after which events will be removed from the database listener queue"),
+     EVENT_DB_NOTIFICATION_API_AUTH("metastore.metastore.event.db.notification.api.auth",
+         "hive.metastore.event.db.notification.api.auth", true,
+         "Should metastore do authorization against database notification related APIs such as get_next_notification.\n" +
+             "If set to true, then only the superusers in proxy settings have the permission"),
+     EXECUTE_SET_UGI("metastore.execute.setugi", "hive.metastore.execute.setugi", true,
+         "In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using \n" +
+             "the client's reported user and group permissions. Note that this property must be set on \n" +
+             "both the client and server sides. Further note that its best effort. \n" +
+             "If client sets its to true and server sets it to false, client setting will be ignored."),
+     EXPRESSION_PROXY_CLASS("metastore.expression.proxy", "hive.metastore.expression.proxy",
+         "org.apache.hadoop.hive.ql.optimizer.ppr.PartitionExpressionForMetastore",
+         "Class to use to process expressions in partition pruning."),
+     FILE_METADATA_THREADS("metastore.file.metadata.threads",
+         "hive.metastore.hbase.file.metadata.threads", 1,
+         "Number of threads to use to read file metadata in background to cache it."),
+     FILTER_HOOK("metastore.filter.hook", "hive.metastore.filter.hook",
+         org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl.class.getName(),
+         "Metastore hook class for filtering the metadata read results. If hive.security.authorization.manager"
+             + "is set to instance of HiveAuthorizerFactory, then this value is ignored."),
+     FS_HANDLER_CLS("metastore.fs.handler.class", "hive.metastore.fs.handler.class",
+         "org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl", ""),
+     FS_HANDLER_THREADS_COUNT("metastore.fshandler.threads", "hive.metastore.fshandler.threads", 15,
+         "Number of threads to be allocated for metastore handler for fs operations."),
+     HMS_HANDLER_ATTEMPTS("metastore.hmshandler.retry.attempts", "hive.hmshandler.retry.attempts", 10,
+         "The number of times to retry a HMSHandler call if there were a connection error."),
+     HMS_HANDLER_FORCE_RELOAD_CONF("metastore.hmshandler.force.reload.conf",
+         "hive.hmshandler.force.reload.conf", false,
+         "Whether to force reloading of the HMSHandler configuration (including\n" +
+             "the connection URL, before the next metastore query that accesses the\n" +
+             "datastore. Once reloaded, this value is reset to false. Used for\n" +
+             "testing only."),
+     HMS_HANDLER_INTERVAL("metastore.hmshandler.retry.interval", "hive.hmshandler.retry.interval",
+         2000, TimeUnit.MILLISECONDS, "The time between HMSHandler retry attempts on failure."),
+     IDENTIFIER_FACTORY("datanucleus.identifierFactory",
+         "datanucleus.identifierFactory", "datanucleus1",
+         "Name of the identifier factory to use when generating table/column names etc. \n" +
+             "'datanucleus1' is used for backward compatibility with DataNucleus v1"),
+     INIT_HOOKS("metastore.init.hooks", "hive.metastore.init.hooks", "",
+         "A comma separated list of hooks to be invoked at the beginning of HMSHandler initialization. \n" +
+             "An init hook is specified as the name of Java class which extends org.apache.riven.MetaStoreInitListener."),
+     INIT_METADATA_COUNT_ENABLED("metastore.initial.metadata.count.enabled",
+         "hive.metastore.initial.metadata.count.enabled", true,
+         "Enable a metadata count at metastore startup for metrics."),
+     INTEGER_JDO_PUSHDOWN("metastore.integral.jdo.pushdown",
+         "hive.metastore.integral.jdo.pushdown", false,
+         "Allow JDO query pushdown for integral partition columns in metastore. Off by default. This\n" +
+             "improves metastore perf for integral columns, especially if there's a large number of partitions.\n" +
+             "However, it doesn't work correctly with integral values that are not normalized (e.g. have\n" +
+             "leading zeroes, like 0012). If metastore direct SQL is enabled and works, this optimization\n" +
+             "is also irrelevant."),
+     KERBEROS_KEYTAB_FILE("metastore.kerberos.keytab.file",
+         "hive.metastore.kerberos.keytab.file", "",
+         "The path to the Kerberos Keytab file containing the metastore Thrift server's service principal."),
+     KERBEROS_PRINCIPAL("metastore.kerberos.principal", "hive.metastore.kerberos.principal",
+         "hive-metastore/_HOST@EXAMPLE.COM",
+         "The service principal for the metastore Thrift server. \n" +
+             "The special string _HOST will be replaced automatically with the correct host name."),
+     LIMIT_PARTITION_REQUEST("metastore.limit.partition.request",
+         "hive.metastore.limit.partition.request", -1,
+         "This limits the number of partitions (whole partition objects) that can be requested " +
+         "from the metastore for a give table. MetaStore API methods using this are: \n" +
+                 "get_partitions, \n" +
+                 "get_partitions_with_auth, \n" +
+                 "get_partitions_by_filter, \n" +
+                 "get_partitions_by_expr.\n" +
+             "The default value \"-1\" means no limit."),
+     LOG4J_FILE("metastore.log4j.file", "hive.log4j.file", "",
+         "Hive log4j configuration file.\n" +
+             "If the property is not set, then logging will be initialized using metastore-log4j2.properties found on the classpath.\n" +
+             "If the property is set, the value must be a valid URI (java.net.URI, e.g. \"file:///tmp/my-logging.xml\"), \n" +
+             "which you can then extract a URL from and pass to PropertyConfigurator.configure(URL)."),
+     MANAGER_FACTORY_CLASS("javax.jdo.PersistenceManagerFactoryClass",
+         "javax.jdo.PersistenceManagerFactoryClass",
+         "org.datanucleus.api.jdo.JDOPersistenceManagerFactory",
+         "class implementing the jdo persistence"),
+     MATERIALIZATIONS_INVALIDATION_CACHE_IMPL("metastore.materializations.invalidation.impl",
+         "hive.metastore.materializations.invalidation.impl", "DEFAULT",
+         new StringSetValidator("DEFAULT", "DISABLE"),
+         "The implementation that we should use for the materializations invalidation cache. \n" +
+             "  DEFAULT: Default implementation for invalidation cache\n" +
+             "  DISABLE: Disable invalidation cache (debugging purposes)"),
+     MATERIALIZATIONS_INVALIDATION_CACHE_CLEAN_FREQUENCY("metastore.materializations.invalidation.clean.frequency",
+          "hive.metastore.materializations.invalidation.clean.frequency",
+          3600, TimeUnit.SECONDS, "Frequency at which timer task runs to remove unnecessary transaction entries from" +
+           "materializations invalidation cache."),
+     MATERIALIZATIONS_INVALIDATION_CACHE_EXPIRY_DURATION("metastore.materializations.invalidation.max.duration",
+          "hive.metastore.materializations.invalidation.max.duration",
+          86400, TimeUnit.SECONDS, "Maximum duration for query producing a materialization. After this time, transaction" +
+          "entries that are not relevant for materializations can be removed from invalidation cache."),
+ 
+     RUNTIME_STATS_CLEAN_FREQUENCY("runtime.stats.clean.frequency", "hive.metastore.runtime.stats.clean.frequency", 3600,
+         TimeUnit.SECONDS, "Frequency at which timer task runs to remove outdated runtime stat entries."),
+     RUNTIME_STATS_MAX_AGE("runtime.stats.max.age", "hive.metastore.runtime.stats.max.age", 86400 * 3, TimeUnit.SECONDS,
+         "Stat entries which are older than this are removed."),
+ 
+     // Parameters for exporting metadata on table drop (requires the use of the)
+     // org.apache.hadoop.hive.ql.parse.MetaDataExportListener preevent listener
+     METADATA_EXPORT_LOCATION("metastore.metadata.export.location", "hive.metadata.export.location",
+         "",
+         "When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \n" +
+             "it is the location to which the metadata will be exported. The default is an empty string, which results in the \n" +
+             "metadata being exported to the current user's home directory on HDFS."),
+     MOVE_EXPORTED_METADATA_TO_TRASH("metastore.metadata.move.exported.metadata.to.trash",
+         "hive.metadata.move.exported.metadata.to.trash", true,
+         "When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \n" +
+             "this setting determines if the metadata that is exported will subsequently be moved to the user's trash directory \n" +
+             "alongside the dropped table data. This ensures that the metadata will be cleaned up along with the dropped table data."),
+     METRICS_ENABLED("metastore.metrics.enabled", "hive.metastore.metrics.enabled", false,
+         "Enable metrics on the metastore."),
+     METRICS_HADOOP2_COMPONENT_NAME("metastore.metrics.hadoop2.component", "hive.service.metrics.hadoop2.component", "hivemetastore",
+                     "Component name to provide to Hadoop2 Metrics system."),
+     METRICS_JSON_FILE_INTERVAL("metastore.metrics.file.frequency",
+         "hive.service.metrics.file.frequency", 1, TimeUnit.MINUTES,
+         "For json metric reporter, the frequency of updating JSON metrics file."),
+     METRICS_JSON_FILE_LOCATION("metastore.metrics.file.location",
+         "hive.service.metrics.file.location", "/tmp/report.json",
+         "For metric class json metric reporter, the location of local JSON metrics file.  " +
+             "This file will get overwritten at every interval."),
+     METRICS_REPORTERS("metastore.metrics.reporters", "metastore.metrics.reporters", "json,jmx",
+         new StringSetValidator("json", "jmx", "console", "hadoop"),
+         "A comma separated list of metrics reporters to start"),
+     MULTITHREADED("javax.jdo.option.Multithreaded", "javax.jdo.option.Multithreaded", true,
+         "Set this to true if multiple threads access metastore through JDO concurrently."),
+     MAX_OPEN_TXNS("metastore.max.open.txns", "hive.max.open.txns", 100000,
+         "Maximum number of open transactions. If \n" +
+         "current open transactions reach this limit, future open transaction requests will be \n" +
+         "rejected, until this number goes below the limit."),
+     NON_TRANSACTIONAL_READ("javax.jdo.option.NonTransactionalRead",
+         "javax.jdo.option.NonTransactionalRead", true,
+         "Reads outside of transactions"),
+     NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES("metastore.notification.sequence.lock.max.retries",
+         "hive.notification.sequence.lock.max.retries", 5,
+         "Number of retries required to acquire a lock when getting the next notification sequential ID for entries "
+             + "in the NOTIFICATION_LOG table."),
+     NOTIFICATION_SEQUENCE_LOCK_RETRY_SLEEP_INTERVAL(
+         "metastore.notification.sequence.lock.retry.sleep.interval",
+         "hive.notification.sequence.lock.retry.sleep.interval", 500, TimeUnit.MILLISECONDS,
+         "Sleep interval between retries to acquire a notification lock as described part of property "
+             + NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES.name()),
+     ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS("metastore.orm.retrieveMapNullsAsEmptyStrings",
+         "hive.metastore.orm.retrieveMapNullsAsEmptyStrings",false,
+         "Thrift does not support nulls in maps, so any nulls present in maps retrieved from ORM must " +
+             "either be pruned or converted to empty strings. Some backing dbs such as Oracle persist empty strings " +
+             "as nulls, so we should set this parameter if we wish to reverse that behaviour. For others, " +
+             "pruning is the correct behaviour"),
+     PARTITION_NAME_WHITELIST_PATTERN("metastore.partition.name.whitelist.pattern",
+         "hive.metastore.partition.name.whitelist.pattern", "",
+         "Partition names will be checked against this regex pattern and rejected if not matched."),
+     PART_INHERIT_TBL_PROPS("metastore.partition.inherit.table.properties",
+         "hive.metastore.partition.inherit.table.properties", "",
+         "List of comma separated keys occurring in table properties which will get inherited to newly created partitions. \n" +
+             "* implies all the keys will get inherited."),
+     PRE_EVENT_LISTENERS("metastore.pre.event.listeners", "hive.metastore.pre.event.listeners", "",
+         "List of comma separated listeners for metastore events."),
+     PWD("javax.jdo.option.ConnectionPassword", "javax.jdo.option.ConnectionPassword", "mine",
+         "password to use against metastore database"),
+     RAW_STORE_IMPL("metastore.rawstore.impl", "hive.metastore.rawstore.impl",
+         "org.apache.hadoop.hive.metastore.ObjectStore",
+         "Name of the class that implements org.apache.riven.rawstore interface. \n" +
+             "This class is used to store and retrieval of raw metadata objects such as table, database"),
+     REPLCMDIR("metastore.repl.cmrootdir", "hive.repl.cmrootdir", "/user/hive/cmroot/",
+         "Root dir for ChangeManager, used for deleted files."),
+     REPLCMRETIAN("metastore.repl.cm.retain", "hive.repl.cm.retain",  24, TimeUnit.HOURS,
+         "Time to retain removed files in cmrootdir."),
+     REPLCMINTERVAL("metastore.repl.cm.interval", "hive.repl.cm.interval", 3600, TimeUnit.SECONDS,
+         "Inteval for cmroot cleanup thread."),
+     REPLCMENABLED("metastore.repl.cm.enabled", "hive.repl.cm.enabled", false,
+         "Turn on ChangeManager, so delete files will go to cmrootdir."),
+     REPLDIR("metastore.repl.rootdir", "hive.repl.rootdir", "/user/hive/repl/",
+         "HDFS root dir for all replication dumps."),
+     REPL_COPYFILE_MAXNUMFILES("metastore.repl.copyfile.maxnumfiles",
+         "hive.exec.copyfile.maxnumfiles", 1L,
+         "Maximum number of files Hive uses to do sequential HDFS copies between directories." +
+             "Distributed copies (distcp) will be used instead for larger numbers of files so that copies can be done faster."),
+     REPL_COPYFILE_MAXSIZE("metastore.repl.copyfile.maxsize",
+         "hive.exec.copyfile.maxsize", 32L * 1024 * 1024 /*32M*/,
+         "Maximum file size (in bytes) that Hive uses to do single HDFS copies between directories." +
+             "Distributed copies (distcp) will be used instead for bigger files so that copies can be done faster."),
+     SCHEMA_INFO_CLASS("metastore.schema.info.class", "hive.metastore.schema.info.class",
+         "org.apache.hadoop.hive.metastore.MetaStoreSchemaInfo",
+         "Fully qualified class name for the metastore schema information class \n"
+             + "which is used by schematool to fetch the schema information.\n"
+             + " This class should implement the IMetaStoreSchemaInfo interface"),
+     SCHEMA_VERIFICATION("metastore.schema.verification", "hive.metastore.schema.verification", true,
+         "Enforce metastore schema version consistency.\n" +
+         "True: Verify that version information stored in is compatible with one from Hive jars.  Also disable automatic\n" +
+         "      schema migration attempt. Users are required to manually migrate schema after Hive upgrade which ensures\n" +
+         "      proper metastore schema migration. (Default)\n" +
+         "False: Warn if the version information stored in metastore doesn't match with one from in Hive jars."),
+     SCHEMA_VERIFICATION_RECORD_VERSION("metastore.schema.verification.record.version",
+         "hive.metastore.schema.verification.record.version", false,
+         "When true the current MS version is recorded in the VERSION table. If this is disabled and verification is\n" +
+             " enabled the MS will be unusable."),
+     SERDES_USING_METASTORE_FOR_SCHEMA("metastore.serdes.using.metastore.for.schema",
+         "hive.serdes.using.metastore.for.schema",
+         "org.apache.hadoop.hive.ql.io.orc.OrcSerde," +
+             "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe," +
+             "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe," +
+             "org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe," +
+             "org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe," +
+             "org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe," +
+             "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe," +
+             "org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe",
+         "SerDes retrieving schema from metastore. This is an internal parameter."),
+     SERVER_MAX_MESSAGE_SIZE("metastore.server.max.message.size",
+         "hive.metastore.server.max.message.size", 100*1024*1024L,
+         "Maximum message size in bytes a HMS will accept."),
+     SERVER_MAX_THREADS("metastore.server.max.threads",
+         "hive.metastore.server.max.threads", 1000,
+         "Maximum number of worker threads in the Thrift server's pool."),
+     SERVER_MIN_THREADS("metastore.server.min.threads", "hive.metastore.server.min.threads", 200,
+         "Minimum number of worker threads in the Thrift server's pool."),
+     SERVER_PORT("metastore.thrift.port", "hive.metastore.port", 9083,
+         "Hive metastore listener port"),
+     SSL_KEYSTORE_PASSWORD("metastore.keystore.password", "hive.metastore.keystore.password", "",
+         "Metastore SSL certificate keystore password."),
+     SSL_KEYSTORE_PATH("metastore.keystore.path", "hive.metastore.keystore.path", "",
+         "Metastore SSL certificate keystore location."),
+     SSL_PROTOCOL_BLACKLIST("metastore.ssl.protocol.blacklist", "hive.ssl.protocol.blacklist",
+         "SSLv2,SSLv3", "SSL Versions to disable for all Hive Servers"),
+     SSL_TRUSTSTORE_PATH("metastore.truststore.path", "hive.metastore.truststore.path", "",
+         "Metastore SSL certificate truststore location."),
+     SSL_TRUSTSTORE_PASSWORD("metastore.truststore.password", "hive.metastore.truststore.password", "",
+         "Metastore SSL certificate truststore password."),
+     STATS_AUTO_GATHER("metastore.stats.autogather", "hive.stats.autogather", true,
+         "A flag to gather statistics (only basic) automatically during the INSERT OVERWRITE command."),
+     STATS_FETCH_BITVECTOR("metastore.stats.fetch.bitvector", "hive.stats.fetch.bitvector", false,
+         "Whether we fetch bitvector when we compute ndv. Users can turn it off if they want to use old schema"),
+     STATS_NDV_TUNER("metastore.stats.ndv.tuner", "hive.metastore.stats.ndv.tuner", 0.0,
+         "Provides a tunable parameter between the lower bound and the higher bound of ndv for aggregate ndv across all the partitions. \n" +
+             "The lower bound is equal to the maximum of ndv of all the partitions. The higher bound is equal to the sum of ndv of all the partitions.\n" +
+             "Its value should be between 0.0 (i.e., choose lower bound) and 1.0 (i.e., choose higher bound)"),
+     STATS_NDV_DENSITY_FUNCTION("metastore.stats.ndv.densityfunction",
+         "hive.metastore.stats.ndv.densityfunction", false,
+         "Whether to use density function to estimate the NDV for the whole table based on the NDV of partitions"),
+     STATS_DEFAULT_AGGREGATOR("metastore.stats.default.aggregator", "hive.stats.default.aggregator",
+         "",
+         "The Java class (implementing the StatsAggregator interface) that is used by default if hive.stats.dbclass is custom type."),
+     STATS_DEFAULT_PUBLISHER("metastore.stats.default.publisher", "hive.stats.default.publisher", "",
+         "The Java class (implementing the StatsPublisher interface) that is used by default if hive.stats.dbclass is custom type."),
+     STATS_AUTO_UPDATE("metastore.stats.auto.analyze", "hive.metastore.stats.auto.analyze", "none",
+         new EnumValidator(StatsUpdateMode.values()),
+         "Whether to update stats in the background; none - no, all - for all tables, existing - only existing, out of date, stats."),
+     STATS_AUTO_UPDATE_NOOP_WAIT("metastore.stats.auto.analyze.noop.wait",
+         "hive.metastore.stats.auto.analyze.noop.wait", 5L, TimeUnit.MINUTES,
+         new TimeValidator(TimeUnit.MINUTES),
+         "How long to sleep if there were no stats needing update during an update iteration.\n" +
+         "This is a setting to throttle table/partition checks when nothing is being changed; not\n" +
+         "the analyze queries themselves."),
+     STATS_AUTO_UPDATE_WORKER_COUNT("metastore.stats.auto.analyze.worker.count",
+         "hive.metastore.stats.auto.analyze.worker.count", 1,
+         "Number of parallel analyze commands to run for background stats update."),
+     STORAGE_SCHEMA_READER_IMPL("metastore.storage.schema.reader.impl", "metastore.storage.schema.reader.impl",
+         DefaultStorageSchemaReader.class.getName(),
+         "The class to use to read schemas from storage.  It must implement " +
+         "org.apache.hadoop.hive.metastore.StorageSchemaReader"),
+     STORE_MANAGER_TYPE("datanucleus.storeManagerType", "datanucleus.storeManagerType", "rdbms", "metadata store type"),
+     STRICT_MANAGED_TABLES("metastore.strict.managed.tables", "hive.strict.managed.tables", false,
+             "Whether strict managed tables mode is enabled. With this mode enabled, " +
+             "only transactional tables (both full and insert-only) are allowed to be created as managed tables"),
+     SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES("metastore.support.special.characters.tablename",
+         "hive.support.special.characters.tablename", true,
+         "This flag should be set to true to enable support for special characters in table names.\n"
+             + "When it is set to false, only [a-zA-Z_0-9]+ are supported.\n"
+             + "The only supported special character right now is '/'. This flag applies only to quoted table names.\n"
+             + "The default value is true."),
+     TASK_THREADS_ALWAYS("metastore.task.threads.always", "metastore.task.threads.always",
+         EventCleanerTask.class.getName() + "," + RuntimeStatsCleanerTask.class.getName() + "," +
+         "org.apache.hadoop.hive.metastore.repl.DumpDirCleanerTask" + "," +
+             "org.apache.hadoop.hive.metastore.HiveProtoEventsCleanerTask",
+         "Comma separated list of tasks that will be started in separate threads.  These will " +
+             "always be started, regardless of whether the metastore is running in embedded mode " +
+             "or in server mode.  They must implement " + MetastoreTaskThread.class.getName()),
+     TASK_THREADS_REMOTE_ONLY("metastore.task.threads.remote", "metastore.task.threads.remote",
+         AcidHouseKeeperService.class.getName() + "," +
+             AcidOpenTxnsCounterService.class.getName() + "," +
+             AcidCompactionHistoryService.class.getName() + "," +
+             AcidWriteSetService.class.getName() + "," +
+             MaterializationsRebuildLockCleanerTask.class.getName(),
+         "Command separated list of tasks that will be started in separate threads.  These will be" +
+             " started only when the metastore is running as a separate service.  They must " +
+             "implement " + MetastoreTaskThread.class.getName()),
+     TCP_KEEP_ALIVE("metastore.server.tcp.keepalive",
+         "hive.metastore.server.tcp.keepalive", true,
+         "Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections."),
+     THREAD_POOL_SIZE("metastore.thread.pool.size", "no.such", 10,
+         "Number of threads in the thread pool.  These will be used to execute all background " +
+             "processes."),
+     THRIFT_CONNECTION_RETRIES("metastore.connect.retries", "hive.metastore.connect.retries", 3,
+         "Number of retries while opening a connection to metastore"),
+     THRIFT_FAILURE_RETRIES("metastore.failure.retries", "hive.metastore.failure.retries", 1,
+         "Number of retries upon failure of Thrift metastore calls"),
+     THRIFT_URIS("metastore.thrift.uris", "hive.metastore.uris", "",
+         "Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore."),
+     THRIFT_URI_SELECTION("metastore.thrift.uri.selection", "hive.metastore.uri.selection", "RANDOM",
+         new StringSetValidator("RANDOM", "SEQUENTIAL"),
+         "Determines the selection mechanism used by metastore client to connect to remote " +
+         "metastore.  SEQUENTIAL implies that the first valid metastore from the URIs specified " +
+         "as part of hive.metastore.uris will be picked.  RANDOM implies that the metastore " +
+         "will be picked randomly"),
+     TIMEDOUT_TXN_REAPER_START("metastore.timedout.txn.reaper.start",
+         "hive.timedout.txn.reaper.start", 100, TimeUnit.SECONDS,
+         "Time delay of 1st reaper run after metastore start"),
+     TIMEDOUT_TXN_REAPER_INTERVAL("metastore.timedout.txn.reaper.interval",
+         "hive.timedout.txn.reaper.interval", 180, TimeUnit.SECONDS,
+         "Time interval describing how often the reaper runs"),
+     TOKEN_SIGNATURE("metastore.token.signature", "hive.metastore.token.signature", "",
+         "The delegation token service name to match when selecting a token from the current user's tokens."),
+     TRANSACTIONAL_EVENT_LISTENERS("metastore.transactional.event.listeners",
+         "hive.metastore.transactional.event.listeners", "",
+         "A comma separated list of Java classes that implement the org.apache.riven.MetaStoreEventListener" +
+             " interface. Both the metastore event and corresponding listener method will be invoked in the same JDO transaction."),
+     TRY_DIRECT_SQL("metastore.try.direct.sql", "hive.metastore.try.direct.sql", true,
+         "Whether the metastore should try to use direct SQL queries instead of the\n" +
+             "DataNucleus for certain read paths. This can improve metastore performance when\n" +
+             "fetching many partitions or column statistics by orders of magnitude; however, it\n" +
+             "is not guaranteed to work on all RDBMS-es and all versions. In case of SQL failures,\n" +
+             "the metastore will fall back to the DataNucleus, so it's safe even if SQL doesn't\n" +
+             "work for all queries on your datastore. If all SQL queries fail (for example, your\n" +
+             "metastore is backed by MongoDB), you might want to disable this to save the\n" +
+             "try-and-fall-back cost."),
+     TRY_DIRECT_SQL_DDL("metastore.try.direct.sql.ddl", "hive.metastore.try.direct.sql.ddl", true,
+         "Same as hive.metastore.try.direct.sql, for read statements within a transaction that\n" +
+             "modifies metastore data. Due to non-standard behavior in Postgres, if a direct SQL\n" +
+             "select query has incorrect syntax or something similar inside a transaction, the\n" +
+             "entire transaction will fail and fall-back to DataNucleus will not be possible. You\n" +
+             "should disable the usage of direct SQL inside transactions if that happens in your case."),
+     TXN_MAX_OPEN_BATCH("metastore.txn.max.open.batch", "hive.txn.max.open.batch", 1000,
+         "Maximum number of transactions that can be fetched in one call to open_txns().\n" +
+             "This controls how many transactions streaming agents such as Flume or Storm open\n" +
+             "simultaneously. The streaming agent then writes that number of entries into a single\n" +
+             "file (per Flume agent or Storm bolt). Thus increasing this value decreases the number\n" +
+             "of delta files created by streaming agents. But it also increases the number of open\n" +
+             "transactions that Hive has to track at any given time, which may negatively affect\n" +
+             "read performance."),
+     TXN_RETRYABLE_SQLEX_REGEX("metastore.txn.retryable.sqlex.regex",
+         "hive.txn.retryable.sqlex.regex", "", "Comma separated list\n" +
+         "of regular expression patterns for SQL state, error code, and error message of\n" +
+         "retryable SQLExceptions, that's suitable for the metastore DB.\n" +
+         "For example: Can't serialize.*,40001$,^Deadlock,.*ORA-08176.*\n" +
+         "The string that the regex will be matched against is of the following form, where ex is a SQLException:\n" +
+         "ex.getMessage() + \" (SQLState=\" + ex.getSQLState() + \", ErrorCode=\" + ex.getErrorCode() + \")\""),
+     TXN_STORE_IMPL("metastore.txn.store.impl", "hive.metastore.txn.store.impl",
+         "org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler",
+         "Name of class that implements org.apache.riven.txn.TxnStore.  This " +
+             "class is used to store and retrieve transactions and locks"),
+     TXN_TIMEOUT("metastore.txn.timeout", "hive.txn.timeout", 300, TimeUnit.SECONDS,
+         "time after which transactions are declared aborted if the client has not sent a heartbeat."),
+     URI_RESOLVER("metastore.uri.resolver", "hive.metastore.uri.resolver", "",
+             "If set, fully qualified class name of resolver for hive metastore uri's"),
+     USERS_IN_ADMIN_ROLE("metastore.users.in.admin.role", "hive.users.in.admin.role", "", false,
+         "Comma separated list of users who are in admin role for bootstrapping.\n" +
+             "More users can be added in ADMIN role later."),
+     USE_SSL("metastore.use.SSL", "hive.metastore.use.SSL", false,
+         "Set this to true for using SSL encryption in HMS server."),
+     USE_THRIFT_SASL("metastore.sasl.enabled", "hive.metastore.sasl.enabled", false,
+         "If true, the metastore Thrift interface will be secured with SASL. Clients must authenticate with Kerberos."),
+     USE_THRIFT_FRAMED_TRANSPORT("metastore.thrift.framed.transport.enabled",
+         "hive.metastore.thrift.framed.transport.enabled", false,
+         "If true, the metastore Thrift interface will use TFramedTransport. When false (default) a standard TTransport is used."),
+     USE_THRIFT_COMPACT_PROTOCOL("metastore.thrift.compact.protocol.enabled",
+         "hive.metastore.thrift.compact.protocol.enabled", false,
+         "If true, the metastore Thrift interface will use TCompactProtocol. When false (default) TBinaryProtocol will be used.\n" +
+             "Setting it to true will break compatibility with older clients running TBinaryProtocol."),
+     VALIDATE_COLUMNS("datanucleus.schema.validateColumns", "datanucleus.schema.validateColumns", false,
+         "validates existing schema against code. turn this on if you want to verify existing schema"),
+     VALIDATE_CONSTRAINTS("datanucleus.schema.validateConstraints",
+         "datanucleus.schema.validateConstraints", false,
+         "validates existing schema against code. turn this on if you want to verify existing schema"),
+     VALIDATE_TABLES("datanucleus.schema.validateTables",
+         "datanucleus.schema.validateTables", false,
+         "validates existing schema against code. turn this on if you want to verify existing schema"),
+     WAREHOUSE("metastore.warehouse.dir", "hive.metastore.warehouse.dir", "/user/hive/warehouse",
+         "location of default database for the warehouse"),
+     WAREHOUSE_EXTERNAL("metastore.warehouse.external.dir",
+         "hive.metastore.warehouse.external.dir", "",
+         "Default location for external tables created in the warehouse. " +
+         "If not set or null, then the normal warehouse location will be used as the default location."),
+     WRITE_SET_REAPER_INTERVAL("metastore.writeset.reaper.interval",
+         "hive.writeset.reaper.interval", 60, TimeUnit.SECONDS,
+         "Frequency of WriteSet reaper runs"),
+     WM_DEFAULT_POOL_SIZE("metastore.wm.default.pool.size",
+         "hive.metastore.wm.default.pool.size", 4,
+         "The size of a default pool to create when creating an empty resource plan;\n" +
+         "If not positive, no default pool will be created."),
+     RAWSTORE_PARTITION_BATCH_SIZE("metastore.rawstore.batch.size",
+         "metastore.rawstore.batch.size", -1,
+         "Batch size for partition and other object retrieval from the underlying DB in JDO.\n" +
+         "The JDO implementation such as DataNucleus may run into issues when the generated queries are\n" +
+         "too large. Use this parameter to break the query into multiple batches. -1 means no batching."),
+ 
+     // Hive values we have copied and use as is
+     // These two are used to indicate that we are running tests
+     HIVE_IN_TEST("hive.in.test", "hive.in.test", false, "internal usage only, true in test mode"),
+     HIVE_IN_TEZ_TEST("hive.in.tez.test", "hive.in.tez.test", false,
+         "internal use only, true when in testing tez"),
+     // We need to track this as some listeners pass it through our config and we need to honor
+     // the system properties.
+     HIVE_AUTHORIZATION_MANAGER("hive.security.authorization.manager",
+         "hive.security.authorization.manager",
+         "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory",
+         "The Hive client authorization manager class name. The user defined authorization class should implement \n" +
+             "interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider."),
+     HIVE_METASTORE_AUTHENTICATOR_MANAGER("hive.security.metastore.authenticator.manager",
+         "hive.security.metastore.authenticator.manager",
+         "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator",
+         "authenticator manager class name to be used in the metastore for authentication. \n" +
+             "The user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider."),
+     HIVE_METASTORE_AUTHORIZATION_AUTH_READS("hive.security.metastore.authorization.auth.reads",
+         "hive.security.metastore.authorization.auth.reads", true,
+         "If this is true, metastore authorizer authorizes read actions on database, table"),
+     // The metastore shouldn't care what txn manager Hive is running, but in various tests it
+     // needs to set these values.  We should do the work to detangle this.
+     HIVE_TXN_MANAGER("hive.txn.manager", "hive.txn.manager",
+         "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager",
+         "Set to org.apache.hadoop.hive.ql.lockmgr.DbTxnManager as part of turning on Hive\n" +
+             "transactions, which also requires appropriate settings for hive.compactor.initiator.on,\n" +
+             "hive.compactor.worker.threads, hive.support.concurrency (true),\n" +
+             "and hive.exec.dynamic.partition.mode (nonstrict).\n" +
+             "The default DummyTxnManager replicates pre-Hive-0.13 behavior and provides\n" +
+             "no transactions."),
+     // Metastore always support concurrency, but certain ACID tests depend on this being set.  We
+     // need to do the work to detangle this
+     HIVE_SUPPORT_CONCURRENCY("hive.support.concurrency", "hive.support.concurrency", false,
+         "Whether Hive supports concurrency control or not. \n" +
+             "A ZooKeeper instance must be up and running when using zookeeper Hive lock manager "),
++    HIVE_TXN_STATS_ENABLED("hive.txn.stats.enabled", "hive.txn.stats.enabled", true,
++        "Whether Hive supports transactional stats (accurate stats for transactional tables)"),
+ 
+     // Deprecated Hive values that we are keeping for backwards compatibility.
+     @Deprecated
+     HIVE_CODAHALE_METRICS_REPORTER_CLASSES("hive.service.metrics.codahale.reporter.classes",
+         "hive.service.metrics.codahale.reporter.classes", "",
+         "Use METRICS_REPORTERS instead.  Comma separated list of reporter implementation classes " +
+             "for metric class org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics. Overrides "
+             + "HIVE_METRICS_REPORTER conf if present.  This will be overridden by " +
+             "METRICS_REPORTERS if it is present"),
+     @Deprecated
+     HIVE_METRICS_REPORTER("hive.service.metrics.reporter", "hive.service.metrics.reporter", "",
+         "Reporter implementations for metric class "
+             + "org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics;" +
+             "Deprecated, use METRICS_REPORTERS instead. This configuraiton will be"
+             + " overridden by HIVE_CODAHALE_METRICS_REPORTER_CLASSES and METRICS_REPORTERS if " +
+             "present. Comma separated list of JMX, CONSOLE, JSON_FILE, HADOOP2"),
+ 
+     // These are all values that we put here just for testing
+     STR_TEST_ENTRY("test.str", "hive.test.str", "defaultval", "comment"),
+     STR_SET_ENTRY("test.str.set", "hive.test.str.set", "a", new StringSetValidator("a", "b", "c"), ""),
+     STR_LIST_ENTRY("test.str.list", "hive.test.str.list", "a,b,c",
+         "no comment"),
+     LONG_TEST_ENTRY("test.long", "hive.test.long", 42, "comment"),
+     DOUBLE_TEST_ENTRY("test.double", "hive.test.double", 3.141592654, "comment"),
+     TIME_TEST_ENTRY("test.time", "hive.test.time", 1, TimeUnit.SECONDS, "comment"),
+     TIME_VALIDATOR_ENTRY_INCLUSIVE("test.time.validator.inclusive", "hive.test.time.validator.inclusive", 1,
+         TimeUnit.SECONDS,
+         new TimeValidator(TimeUnit.MILLISECONDS, 500L, true, 1500L, true), "comment"),
+     TIME_VALIDATOR_ENTRY_EXCLUSIVE("test.time.validator.exclusive", "hive.test.time.validator.exclusive", 1,
+         TimeUnit.SECONDS,
+         new TimeValidator(TimeUnit.MILLISECONDS, 500L, false, 1500L, false), "comment"),
+     BOOLEAN_TEST_ENTRY("test.bool", "hive.test.bool", true, "comment"),
+     CLASS_TEST_ENTRY("test.class", "hive.test.class", "", "comment");
+ 
+     private final String varname;
+     private final String hiveName;
+     private final Object defaultVal;
+     private final Validator validator;
+     private final boolean caseSensitive;
+     private final String description;
+ 
+     ConfVars(String varname, String hiveName, String defaultVal, String description) {
+       this.varname = varname;
+       this.hiveName = hiveName;
+       this.defaultVal = defaultVal;
+       validator = null;
+       caseSensitive = false;
+       this.description = description;
+     }
+ 
+     ConfVars(String varname, String hiveName, String defaultVal, Validator validator,
+              String description) {
+       this.varname = varname;
+       this.hiveName = hiveName;
+       this.defaultVal = defaultVal;
+       this.validator = validator;
+       caseSensitive = false;
+       this.description = description;
+     }
+ 
+     ConfVars(String varname, String hiveName, String defaultVal, boolean caseSensitive,
+              String description) {
+       this.varname = varname;
+       this.hiveName = hiveName;
+       this.defaultVal = defaultVal;
+       validator = null;
+       this.caseSensitive = caseSensitive;
+       this.description = description;
+     }
+ 
+     ConfVars(String varname, String hiveName, long defaultVal, String description) {
+       this.varname = varname;
+       this.hiveName = hiveName;
+       this.defaultVal = defaultVal;
+       validator = null;
+       caseSensitive = false;
+       this.description = description;
+     }
+ 
+     ConfVars(String varname, String hiveName, long defaultVal, Validator validator,
+              String description) {
+       this.varname = varname;
+       this.hiveName = hiveName;
+       this.defaultVal = defaultVal;
+       this.validator = validator;
+       caseSensitive = false;
+       this.description = description;
+     }
+ 
+     ConfVars(String varname, String hiveName, boolean defaultVal, String description) {
+       this.varname = varname;
+       this.hiveName = hiveName;
+       this.defaultVal = defaultVal;
+       validator = null;
+       caseSensitive = false;
+       this.description = description;
+     }
+ 
+     ConfVars(String varname, String hiveName, double defaultVal, String description) {
+       this.varname = varname;
+       this.hiveName = hiveName;
+       this.defaultVal = defaultVal;
+       validator = null;
+       caseSensitive = false;
+       this.description = description;
+     }
+ 
+     ConfVars(String varname, String hiveName, long defaultVal, TimeUnit unit, String description) {
+       this.varname = varname;
+       this.hiveName = hiveName;
+       this.defaultVal = new TimeValue(defaultVal, unit);
+       validator = new TimeValidator(unit);
+       caseSensitive = false;
+       this.description = description;
+     }
+ 
+     ConfVars(String varname, String hiveName, long defaultVal, TimeUnit unit,
+              Validator validator, String description) {
+       this.varname = varname;
+       this.hiveName = hiveName;
+       this.defaultVal = new TimeValue(defaultVal, unit);
+       this.validator = validator;
+       caseSensitive = false;
+       this.description = description;
+     }
+ 
+     public void validate(String value) throws IllegalArgumentException {
+       if (validator != null) {
+         validator.validate(value);
+       }
+     }
+ 
+     public boolean isCaseSensitive() {
+       return caseSensitive;
+     }
+ 
+     /**
+      * If you are calling this, you're probably doing it wrong.  You shouldn't need to use the
+      * underlying variable name.  Use one of the getVar methods instead.  Only use this if you
+      * are 100% sure you know you're doing.  The reason for this is that MetastoreConf goes to a
+      * lot of trouble to make sure it checks both Hive and Metastore values for config keys.  If
+      * you call {@link Configuration#get(String)} you are undermining that.
+      * @return variable name
+      */
+     public String getVarname() {
+       return varname;
+     }
+ 
+     /**
+      * Use this method if you need to set a system property and are going to instantiate the
+      * configuration file via HiveConf.  This is because HiveConf only looks for values it knows,
+      * so it will miss all of the metastore.* ones.  Do not use this to explicitly set or get the
+      * underlying config value unless you are 100% sure you know what you're doing.
+      * The reason for this is that MetastoreConf goes to a
+      * lot of trouble to make sure it checks both Hive and Metastore values for config keys.  If
+      * you call {@link Configuration#get(String)} you are undermining that.
+      * @return hive.* configuration key
+      */
+     public String getHiveName() {
+       return hiveName;
+     }
+ 
+     public Object getDefaultVal() {
+       return defaultVal;
+     }
+ 
+     public String getDescription() {
+       return description;
+     }
+ 
+     /**
+      * This is useful if you need the variable name for a LOG message or
+      * {@link System#setProperty(String, String)}, beware however that you should only use this
+      * with setProperty if you're going to create a configuration via
+      * {@link MetastoreConf#newMetastoreConf()}.  If you are going to create it with HiveConf,
+      * then use {@link #getHiveName()}.
+      * @return metastore.* configuration key
+      */
+     @Override
+     public String toString() {
+       return varname;
+     }
+   }
+ 
+   public static final ConfVars[] dataNucleusAndJdoConfs = {
+       ConfVars.AUTO_CREATE_ALL,
+       ConfVars.CONNECTION_DRIVER,
+       ConfVars.CONNECTION_POOLING_MAX_CONNECTIONS,
+       ConfVars.CONNECTION_POOLING_TYPE,
+       ConfVars.CONNECT_URL_KEY,
+       ConfVars.CONNECTION_USER_NAME,
+       ConfVars.DATANUCLEUS_AUTOSTART,
+       ConfVars.DATANUCLEUS_CACHE_LEVEL2,
+       ConfVars.DATANUCLEUS_CACHE_LEVEL2_TYPE,
+       ConfVars.DATANUCLEUS_INIT_COL_INFO,
+       ConfVars.DATANUCLEUS_PLUGIN_REGISTRY_BUNDLE_CHECK,
+       ConfVars.DATANUCLEUS_TRANSACTION_ISOLATION,
+       ConfVars.DATANUCLEUS_USE_LEGACY_VALUE_STRATEGY,
+       ConfVars.DETACH_ALL_ON_COMMIT,
+       ConfVars.IDENTIFIER_FACTORY,
+       ConfVars.MANAGER_FACTORY_CLASS,
+       ConfVars.MULTITHREADED,
+       ConfVars.NON_TRANSACTIONAL_READ,
+       ConfVars.PWD,
+       ConfVars.STORE_MANAGER_TYPE,
+       ConfVars.VALIDATE_COLUMNS,
+       ConfVars.VALIDATE_CONSTRAINTS,
+       ConfVars.VALIDATE_TABLES
+   };
+ 
+   // Make sure no one calls this
+   private MetastoreConf() {
+     throw new RuntimeException("You should never be creating one of these!");
+   }
+ 
+   public static void setHiveSiteLocation(URL location) {
+     hiveSiteURL = location;
+   }
+ 
+   public static Configuration newMetastoreConf() {
+     return newMetastoreConf(new Configuration());
+   }
+ 
+   public static Configuration newMetastoreConf(Configuration conf) {
+ 
+     ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
+     if (classLoader == null) {
+       classLoader = MetastoreConf.class.getClassLoader();
+     }
+     // We don't add this to the resources because we don't want to read config values from it.
+     // But we do find it because we want to remember where it is for later in case anyone calls
+     // getHiveDefaultLocation().
+     hiveDefaultURL = classLoader.getResource("hive-default.xml");
+ 
+     // Add in hive-site.xml.  We add this first so that it gets overridden by the new metastore
+     // specific files if they exist.
+     if(hiveSiteURL == null) {
+       /*
+        * this 'if' is pretty lame - QTestUtil.QTestUtil() uses hiveSiteURL to load a specific
+        * hive-site.xml from data/conf/<subdir> so this makes it follow the same logic - otherwise
+        * HiveConf and MetastoreConf may load different hive-site.xml  ( For example,
+        * HiveConf uses data/conf/spark/hive-site.xml and MetastoreConf data/conf/hive-site.xml)
+        */
+       hiveSiteURL = findConfigFile(classLoader, "hive-site.xml");
+     }
+     if (hiveSiteURL != null) {
+       conf.addResource(hiveSiteURL);
+     }
+ 
+     // Now add hivemetastore-site.xml.  Again we add this before our own config files so that the
+     // newer overrides the older.
+     hiveMetastoreSiteURL = findConfigFile(classLoader, "hivemetastore-site.xml");
+     if (hiveMetastoreSiteURL != null) {
+       conf.addResource(hiveMetastoreSiteURL);
+     }
+ 
+     // Add in our conf file
+     metastoreSiteURL = findConfigFile(classLoader, "metastore-site.xml");
+     if (metastoreSiteURL !=  null) {
+       conf.addResource(metastoreSiteURL);
+     }
+ 
+     // If a system property that matches one of our conf value names is set then use the value
+     // it's set to to set our own conf value.
+     for (ConfVars var : ConfVars.values()) {
+       if (System.getProperty(var.varname) != null) {
+         LOG.debug("Setting conf value " + var.varname + " using value " +
+             System.getProperty(var.varname));
+         conf.set(var.varname, System.getProperty(var.varname));
+       }
+     }
+ 
+     // Pick up any system properties that start with "hive." and set them in our config.  This
+     // way we can properly pull any Hive values from the environment without needing to know all
+     // of the Hive config values.
+     System.getProperties().stringPropertyNames().stream()
+         .filter(s -> s.startsWith("hive."))
+         .forEach(s -> {
+           String v = System.getProperty(s);
+           LOG.debug("Picking up system property " + s + " with value " + v);
+           conf.set(s, v);
+         });
+ 
+     // If we are going to validate the schema, make sure we don't create it
+     if (getBoolVar(conf, ConfVars.SCHEMA_VERIFICATION)) {
+       setBoolVar(conf, ConfVars.AUTO_CREATE_ALL, false);
+     }
+ 
+     if (!beenDumped.getAndSet(true) && getBoolVar(conf, ConfVars.DUMP_CONFIG_ON_CREATION) &&
+         LOG.isDebugEnabled()) {
+       LOG.debug(dumpConfig(conf));
+     }
+     return conf;
+   }
+ 
+   private static URL findConfigFile(ClassLoader classLoader, String name) {
+     // First, look in the classpath
+     URL result = classLoader.getResource(name);
+     if (result == null) {
+       // Nope, so look to see if our conf dir has been explicitly set
+       result = seeIfConfAtThisLocation("METASTORE_CONF_DIR", name, false);
+       if (result == null) {
+         // Nope, so look to see if our home dir has been explicitly set
+         result = seeIfConfAtThisLocation("METASTORE_HOME", name, true);
+         if (result == null) {
+           // Nope, so look to see if Hive's conf dir has been explicitly set
+           result = seeIfConfAtThisLocation("HIVE_CONF_DIR", name, false);
+           if (result == null) {
+             // Nope, so look to see if Hive's home dir has been explicitly set
+             result = seeIfConfAtThisLocation("HIVE_HOME", name, true);
+             if (result == null) {
+               // Nope, so look to see if we can find a conf file by finding our jar, going up one
+               // directory, and looking for a conf directory.
+               URI jarUri = null;
+               try {
+                 jarUri = MetastoreConf.class.getProtectionDomain().getCodeSource().getLocation().toURI();
+               } catch (Throwable e) {
+                 LOG.warn("Cannot get jar URI", e);
+               }
+               result = seeIfConfAtThisLocation(new File(jarUri).getParent(), name, true);
+               // At this point if we haven't found it, screw it, we don't know where it is
+               if (result == null) {
+                 LOG.info("Unable to find config file " + name);
+               }
+             }
+           }
+         }
+       }
+     }
+     LOG.info("Found configuration file " + result);
+     return result;
+   }
+ 
+   private static URL seeIfConfAtThisLocation(String envVar, String name, boolean inConfDir) {
+     String path = System.getenv(envVar);
+     if (path == null) {
+       // Workaround for testing since tests can't set the env vars.
+       path = System.getProperty(TEST_ENV_WORKAROUND + envVar);
+     }
+     if (path != null) {
+       String suffix = inConfDir ? "conf" + File.separatorChar + name : name;
+       return checkConfigFile(new File(path, suffix));
+     }
+     return null;
+   }
+ 
+   private static URL checkConfigFile(File f) {
+     try {
+       return (f.exists() && f.isFile()) ? f.toURI().toURL() : null;
+     } catch (Throwable e) {
+       LOG.warn("Error looking for config " + f, e);
+       return null;
+     }
+   }
+ 
+   // In all of the getters, we try the metastore value name first.  If it is not set we try the
+   // Hive value name.
+ 
+   /**
+    * Get the variable as a string
+    * @param conf configuration to retrieve it from
+    * @param var variable to retrieve
+    * @return value, or default value if value not in config file
+    */
+   public static String getVar(Configuration conf, ConfVars var) {
+     assert var.defaultVal.getClass() == String.class;
+     String val = conf.get(var.varname);
+     return val == null ? conf.get(var.hiveName, (String)var.defaultVal) : val;
+   }
+ 
+   /**
+    * Get the variable as a string
+    * @param conf configuration to retrieve it from
+    * @param var variable to retrieve
+    * @param defaultVal default to return if the variable is unset
+    * @return value, or default value passed in if the value is not in the config file
+    */
+   public static String getVar(Configuration conf, ConfVars var, String defaultVal) {
+     assert var.defaultVal.getClass() == String.class;
+     String val = conf.get(var.varname);
+     return val == null ? conf.get(var.hiveName, defaultVal) : val;
+   }
+ 
+   /**
+    * Treat a configuration value as a comma separated list.
+    * @param conf configuration to retrieve it from
+    * @param var variable to retrieve
+    * @return collection of strings.  If the value is unset it will return an empty collection.
+    */
+   public static Collection<String> getStringCollection(Configuration conf, ConfVars var) {
+     assert var.defaultVal.getClass() == String.class;
+     String val = conf.get(var.varname);
+     if (val == null) {
+       val = conf.get(var.hiveName, (String)var.defaultVal);
+     }
+     if (val == null) {
+       return Collections.emptySet();
+     }
+     return StringUtils.asSet(val.split(","));
+   }
+ 
+   /**
+    * Set the variable as a string
+    * @param conf configuration file to set it in
+    * @param var variable to set
+    * @param val value to set it to
+    */
+   public static void setVar(Configuration conf, ConfVars var, String val) {
+     assert var.defaultVal.getClass() == String.class;
+     conf.set(var.varname, val);
+   }
+ 
+   /**
+    * Get the variable as a int.  Note that all integer valued variables are stored as longs, thus
+    * this downcasts from a long to an in.
+    * @param conf configuration to retrieve it from
+    * @param var variable to retrieve
+    * @return value, or default value if value not in config file
+    */
+   public static int getIntVar(Configuration conf, ConfVars var) {
+     long val = getLongVar(conf, var);
+     assert val <= Integer.MAX_VALUE;
+     return (int)val;
+   }
+ 
+   /**
+    * Get the variable as a long
+    * @param conf configuration to retrieve it from
+    * @param var variable to retrieve
+    * @return value, or default value if value not in config file
+    */
+   public static long getLongVar(Configuration conf, ConfVars var) {
+     assert var.defaultVal.getClass() == Long.class;
+     String val = conf.get(var.varname);
+     return val == null ? conf.getLong(var.hiveName, (Long)var.defaultVal) : Long.valueOf(val);
+   }
+ 
+   /**
+    * Set the variable as a long
+    * @param conf configuration file to set it in
+    * @param var variable to set
+    * @param val value to set it to
+    */
+   public static void setLongVar(Configuration conf, ConfVars var, long val) {
+     assert var.defaultVal.getClass() == Long.class;
+     conf.setLong(var.varname, val);
+   }
+ 
+   /**
+    * Get the variable as a boolean
+    * @param conf configuration to retrieve it from
+    * @param var variable to retrieve
+    * @return value, or default value if value not in config file
+    */
+   public static boolean getBoolVar(Configuration conf, ConfVars var) {
+     assert var.defaultVal.getClass() == Boolean.class;
+     String val = conf.get(var.varname);
+     return val == null ? conf.getBoolean(var.hiveName, (Boolean)var.defaultVal) : Boolean.valueOf(val);
+   }
+ 
+   /**
+    * Set the variable as a boolean
+    * @param conf configuration file to set it in
+    * @param var variable to set
+    * @param val value to set it to
+    */
+   public static void setBoolVar(Configuration conf, ConfVars var, boolean val) {
+     assert var.defaultVal.getClass() == Boolean.class;
+     conf.setBoolean(var.varname, val);
+   }
+ 
+   /**
+    * Get the variable as a double
+    * @param conf configuration to retrieve it from
+    * @param var variable to retrieve
+    * @return value, or default value if value not in config file
+    */
+   public static double getDoubleVar(Configuration conf, ConfVars var) {
+     assert var.defaultVal.getClass() == Double.class;
+     String val = conf.get(var.varname);
+     return val == null ? conf.getDouble(var.hiveName, (Double)var.defaultVal) : Double.valueOf(val);
+   }
+ 
+   /**
+    * Set the variable as a double
+    * @param conf configuration file to set it in
+    * @param var variable to set
+    * @param val value to set it to
+    */
+   public static void setDoubleVar(Configuration conf, ConfVars var, double val) {
+     assert var.defaultVal.getClass() == Double.class;
+     conf.setDouble(var.varname, val);
+   }
+ 
+   public static long getSizeVar(Configuration conf, ConfVars var) {
+     return SizeValidator.toSizeBytes(getVar(conf, var));
+   }
+ 
+   /**
+    * Get a class instance based on a configuration value
+    * @param conf configuration file to retrieve it from
+    * @param var variable to retrieve
+    * @param defaultValue default class to return if the value isn't set
+    * @param xface interface that class must implement
+    * @param <I> interface that class implements
+    * @return instance of the class
+    */
+   public static <I> Class<? extends I> getClass(Configuration conf, ConfVars var,
+                                                 Class<? extends I> defaultValue,
+                                                 Class<I> xface) {
+     assert var.defaultVal.getClass() == String.class;
+     String val = conf.get(var.varname);
+     return val == null ? conf.getClass(var.hiveName, defaultValue, xface) :
+         conf.getClass(var.varname, def

<TRUNCATED>

[17/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
index 0000000,2bae133..6fcfbce
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
@@@ -1,0 -1,514 +1,621 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.txn;
+ 
+ import java.sql.Connection;
+ import java.sql.Driver;
+ import java.sql.PreparedStatement;
+ import java.sql.ResultSet;
+ import java.sql.ResultSetMetaData;
+ import java.sql.SQLException;
+ import java.sql.SQLTransactionRollbackException;
+ import java.sql.Statement;
+ import java.util.Properties;
+ 
+ import com.google.common.annotations.VisibleForTesting;
++import jline.internal.Log;
+ import org.apache.hadoop.conf.Configuration;
++import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
++import org.apache.zookeeper.txn.TxnHeader;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ /**
+  * Utility methods for creating and destroying txn database/schema, plus methods for
+  * querying against metastore tables.
+  * Placed here in a separate class so it can be shared across unit tests.
+  */
+ public final class TxnDbUtil {
+ 
+   static final private Logger LOG = LoggerFactory.getLogger(TxnDbUtil.class.getName());
+   private static final String TXN_MANAGER = "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager";
+ 
+   private static int deadlockCnt = 0;
+ 
+   private TxnDbUtil() {
+     throw new UnsupportedOperationException("Can't initialize class");
+   }
+ 
+   /**
+    * Set up the configuration so it will use the DbTxnManager, concurrency will be set to true,
+    * and the JDBC configs will be set for putting the transaction and lock info in the embedded
+    * metastore.
+    *
+    * @param conf HiveConf to add these values to
+    */
+   public static void setConfValues(Configuration conf) {
+     MetastoreConf.setVar(conf, ConfVars.HIVE_TXN_MANAGER, TXN_MANAGER);
+     MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, true);
+   }
+ 
+   public static void prepDb(Configuration conf) throws Exception {
+     // This is a bogus hack because it copies the contents of the SQL file
+     // intended for creating derby databases, and thus will inexorably get
+     // out of date with it.  I'm open to any suggestions on how to make this
+     // read the file in a build friendly way.
+ 
+     Connection conn = null;
+     Statement stmt = null;
+     try {
+       conn = getConnection(conf);
+       stmt = conn.createStatement();
+       stmt.execute("CREATE TABLE TXNS (" +
+           "  TXN_ID bigint PRIMARY KEY," +
+           "  TXN_STATE char(1) NOT NULL," +
+           "  TXN_STARTED bigint NOT NULL," +
+           "  TXN_LAST_HEARTBEAT bigint NOT NULL," +
+           "  TXN_USER varchar(128) NOT NULL," +
+           "  TXN_HOST varchar(128) NOT NULL," +
+           "  TXN_TYPE integer)");
+ 
+       stmt.execute("CREATE TABLE TXN_COMPONENTS (" +
+           "  TC_TXNID bigint NOT NULL REFERENCES TXNS (TXN_ID)," +
+           "  TC_DATABASE varchar(128) NOT NULL," +
+           "  TC_TABLE varchar(128)," +
+           "  TC_PARTITION varchar(767)," +
+           "  TC_OPERATION_TYPE char(1) NOT NULL," +
+           "  TC_WRITEID bigint)");
+       stmt.execute("CREATE TABLE COMPLETED_TXN_COMPONENTS (" +
+           "  CTC_TXNID bigint NOT NULL," +
+           "  CTC_DATABASE varchar(128) NOT NULL," +
+           "  CTC_TABLE varchar(128)," +
+           "  CTC_PARTITION varchar(767)," +
+           "  CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL," +
+           "  CTC_WRITEID bigint," +
+           "  CTC_UPDATE_DELETE char(1) NOT NULL)");
+       stmt.execute("CREATE TABLE NEXT_TXN_ID (" + "  NTXN_NEXT bigint NOT NULL)");
+       stmt.execute("INSERT INTO NEXT_TXN_ID VALUES(1)");
+ 
+       stmt.execute("CREATE TABLE TXN_TO_WRITE_ID (" +
+           " T2W_TXNID bigint NOT NULL," +
+           " T2W_DATABASE varchar(128) NOT NULL," +
+           " T2W_TABLE varchar(256) NOT NULL," +
+           " T2W_WRITEID bigint NOT NULL)");
+       stmt.execute("CREATE TABLE NEXT_WRITE_ID (" +
+           " NWI_DATABASE varchar(128) NOT NULL," +
+           " NWI_TABLE varchar(256) NOT NULL," +
+           " NWI_NEXT bigint NOT NULL)");
+ 
+       stmt.execute("CREATE TABLE MIN_HISTORY_LEVEL (" +
+           " MHL_TXNID bigint NOT NULL," +
+           " MHL_MIN_OPEN_TXNID bigint NOT NULL," +
+           " PRIMARY KEY(MHL_TXNID))");
+ 
+       stmt.execute("CREATE TABLE HIVE_LOCKS (" +
+           " HL_LOCK_EXT_ID bigint NOT NULL," +
+           " HL_LOCK_INT_ID bigint NOT NULL," +
+           " HL_TXNID bigint NOT NULL," +
+           " HL_DB varchar(128) NOT NULL," +
+           " HL_TABLE varchar(128)," +
+           " HL_PARTITION varchar(767)," +
+           " HL_LOCK_STATE char(1) NOT NULL," +
+           " HL_LOCK_TYPE char(1) NOT NULL," +
+           " HL_LAST_HEARTBEAT bigint NOT NULL," +
+           " HL_ACQUIRED_AT bigint," +
+           " HL_USER varchar(128) NOT NULL," +
+           " HL_HOST varchar(128) NOT NULL," +
+           " HL_HEARTBEAT_COUNT integer," +
+           " HL_AGENT_INFO varchar(128)," +
+           " HL_BLOCKEDBY_EXT_ID bigint," +
+           " HL_BLOCKEDBY_INT_ID bigint," +
+         " PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID))");
+       stmt.execute("CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID)");
+ 
+       stmt.execute("CREATE TABLE NEXT_LOCK_ID (" + " NL_NEXT bigint NOT NULL)");
+       stmt.execute("INSERT INTO NEXT_LOCK_ID VALUES(1)");
+ 
+       stmt.execute("CREATE TABLE COMPACTION_QUEUE (" +
+           " CQ_ID bigint PRIMARY KEY," +
+           " CQ_DATABASE varchar(128) NOT NULL," +
+           " CQ_TABLE varchar(128) NOT NULL," +
+           " CQ_PARTITION varchar(767)," +
+           " CQ_STATE char(1) NOT NULL," +
+           " CQ_TYPE char(1) NOT NULL," +
+           " CQ_TBLPROPERTIES varchar(2048)," +
+           " CQ_WORKER_ID varchar(128)," +
+           " CQ_START bigint," +
+           " CQ_RUN_AS varchar(128)," +
+           " CQ_HIGHEST_WRITE_ID bigint," +
+           " CQ_META_INFO varchar(2048) for bit data," +
+           " CQ_HADOOP_JOB_ID varchar(32))");
+ 
+       stmt.execute("CREATE TABLE NEXT_COMPACTION_QUEUE_ID (NCQ_NEXT bigint NOT NULL)");
+       stmt.execute("INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1)");
+ 
+       stmt.execute("CREATE TABLE COMPLETED_COMPACTIONS (" +
+           " CC_ID bigint PRIMARY KEY," +
+           " CC_DATABASE varchar(128) NOT NULL," +
+           " CC_TABLE varchar(128) NOT NULL," +
+           " CC_PARTITION varchar(767)," +
+           " CC_STATE char(1) NOT NULL," +
+           " CC_TYPE char(1) NOT NULL," +
+           " CC_TBLPROPERTIES varchar(2048)," +
+           " CC_WORKER_ID varchar(128)," +
+           " CC_START bigint," +
+           " CC_END bigint," +
+           " CC_RUN_AS varchar(128)," +
+           " CC_HIGHEST_WRITE_ID bigint," +
+           " CC_META_INFO varchar(2048) for bit data," +
+           " CC_HADOOP_JOB_ID varchar(32))");
+ 
+       stmt.execute("CREATE TABLE AUX_TABLE (" +
+         " MT_KEY1 varchar(128) NOT NULL," +
+         " MT_KEY2 bigint NOT NULL," +
+         " MT_COMMENT varchar(255)," +
+         " PRIMARY KEY(MT_KEY1, MT_KEY2))");
+ 
+       stmt.execute("CREATE TABLE WRITE_SET (" +
+         " WS_DATABASE varchar(128) NOT NULL," +
+         " WS_TABLE varchar(128) NOT NULL," +
+         " WS_PARTITION varchar(767)," +
+         " WS_TXNID bigint NOT NULL," +
+         " WS_COMMIT_ID bigint NOT NULL," +
+         " WS_OPERATION_TYPE char(1) NOT NULL)"
+       );
+ 
+       stmt.execute("CREATE TABLE REPL_TXN_MAP (" +
+           " RTM_REPL_POLICY varchar(256) NOT NULL, " +
+           " RTM_SRC_TXN_ID bigint NOT NULL, " +
+           " RTM_TARGET_TXN_ID bigint NOT NULL, " +
+           " PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID))"
+       );
+ 
+       stmt.execute("CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (" +
+           "  MRL_TXN_ID BIGINT NOT NULL, " +
+           "  MRL_DB_NAME VARCHAR(128) NOT NULL, " +
+           "  MRL_TBL_NAME VARCHAR(256) NOT NULL, " +
+           "  MRL_LAST_HEARTBEAT BIGINT NOT NULL, " +
+           "  PRIMARY KEY(MRL_TXN_ID))"
+       );
+ 
+       try {
++        stmt.execute("CREATE TABLE \"APP\".\"TBLS\" (\"TBL_ID\" BIGINT NOT NULL, " +
++            " \"CREATE_TIME\" INTEGER NOT NULL, \"DB_ID\" BIGINT, \"LAST_ACCESS_TIME\" INTEGER NOT NULL, " +
++            " \"OWNER\" VARCHAR(767), \"OWNER_TYPE\" VARCHAR(10), \"RETENTION\" INTEGER NOT NULL, " +
++            " \"SD_ID\" BIGINT, \"TBL_NAME\" VARCHAR(256), \"TBL_TYPE\" VARCHAR(128), " +
++            " \"VIEW_EXPANDED_TEXT\" LONG VARCHAR, \"VIEW_ORIGINAL_TEXT\" LONG VARCHAR, " +
++            " \"IS_REWRITE_ENABLED\" CHAR(1) NOT NULL DEFAULT \'N\', " +
++            " \"WRITE_ID\" BIGINT DEFAULT 0, " +
++            " PRIMARY KEY (TBL_ID))"
++        );
++      } catch (SQLException e) {
++        if (e.getMessage() != null && e.getMessage().contains("already exists")) {
++          LOG.info("TBLS table already exist, ignoring");
++        } else {
++          throw e;
++        }
++      }
++
++      try {
++        stmt.execute("CREATE TABLE \"APP\".\"DBS\" (\"DB_ID\" BIGINT NOT NULL, \"DESC\" " +
++            "VARCHAR(4000), \"DB_LOCATION_URI\" VARCHAR(4000) NOT NULL, \"NAME\" VARCHAR(128), " +
++            "\"OWNER_NAME\" VARCHAR(128), \"OWNER_TYPE\" VARCHAR(10), " +
++            "\"CTLG_NAME\" VARCHAR(256) NOT NULL, PRIMARY KEY (DB_ID))");
++      } catch (SQLException e) {
++        if (e.getMessage() != null && e.getMessage().contains("already exists")) {
++          LOG.info("TBLS table already exist, ignoring");
++        } else {
++          throw e;
++        }
++      }
++
++      try {
++        stmt.execute("CREATE TABLE \"APP\".\"PARTITIONS\" (" +
++            " \"PART_ID\" BIGINT NOT NULL, \"CREATE_TIME\" INTEGER NOT NULL, " +
++            " \"LAST_ACCESS_TIME\" INTEGER NOT NULL, \"PART_NAME\" VARCHAR(767), " +
++            " \"SD_ID\" BIGINT, \"TBL_ID\" BIGINT, " +
++            " \"WRITE_ID\" BIGINT DEFAULT 0, " +
++            " PRIMARY KEY (PART_ID))"
++        );
++      } catch (SQLException e) {
++        if (e.getMessage() != null && e.getMessage().contains("already exists")) {
++          LOG.info("PARTITIONS table already exist, ignoring");
++        } else {
++          throw e;
++        }
++      }
++
++      try {
++        stmt.execute("CREATE TABLE \"APP\".\"TABLE_PARAMS\" (" +
++            " \"TBL_ID\" BIGINT NOT NULL, \"PARAM_KEY\" VARCHAR(256) NOT NULL, " +
++            " \"PARAM_VALUE\" CLOB, " +
++            " PRIMARY KEY (TBL_ID, PARAM_KEY))"
++        );
++      } catch (SQLException e) {
++        if (e.getMessage() != null && e.getMessage().contains("already exists")) {
++          LOG.info("TABLE_PARAMS table already exist, ignoring");
++        } else {
++          throw e;
++        }
++      }
++
++      try {
++        stmt.execute("CREATE TABLE \"APP\".\"PARTITION_PARAMS\" (" +
++            " \"PART_ID\" BIGINT NOT NULL, \"PARAM_KEY\" VARCHAR(256) NOT NULL, " +
++            " \"PARAM_VALUE\" VARCHAR(4000), " +
++            " PRIMARY KEY (PART_ID, PARAM_KEY))"
++        );
++      } catch (SQLException e) {
++        if (e.getMessage() != null && e.getMessage().contains("already exists")) {
++          LOG.info("PARTITION_PARAMS table already exist, ignoring");
++        } else {
++          throw e;
++        }
++      }
++
++      try {
+         stmt.execute("CREATE TABLE \"APP\".\"SEQUENCE_TABLE\" (\"SEQUENCE_NAME\" VARCHAR(256) NOT " +
+ 
+                 "NULL, \"NEXT_VAL\" BIGINT NOT NULL)"
+         );
+       } catch (SQLException e) {
+         if (e.getMessage() != null && e.getMessage().contains("already exists")) {
+           LOG.info("SEQUENCE_TABLE table already exist, ignoring");
+         } else {
+           throw e;
+         }
+       }
+ 
+       try {
+         stmt.execute("CREATE TABLE \"APP\".\"NOTIFICATION_SEQUENCE\" (\"NNI_ID\" BIGINT NOT NULL, " +
+ 
+                 "\"NEXT_EVENT_ID\" BIGINT NOT NULL)"
+         );
+       } catch (SQLException e) {
+         if (e.getMessage() != null && e.getMessage().contains("already exists")) {
+           LOG.info("NOTIFICATION_SEQUENCE table already exist, ignoring");
+         } else {
+           throw e;
+         }
+       }
+ 
+       try {
+         stmt.execute("CREATE TABLE \"APP\".\"NOTIFICATION_LOG\" (\"NL_ID\" BIGINT NOT NULL, " +
+                 "\"DB_NAME\" VARCHAR(128), \"EVENT_ID\" BIGINT NOT NULL, \"EVENT_TIME\" INTEGER NOT" +
+ 
+                 " NULL, \"EVENT_TYPE\" VARCHAR(32) NOT NULL, \"MESSAGE\" CLOB, \"TBL_NAME\" " +
+                 "VARCHAR" +
+                 "(256), \"MESSAGE_FORMAT\" VARCHAR(16))"
+         );
+       } catch (SQLException e) {
+         if (e.getMessage() != null && e.getMessage().contains("already exists")) {
+           LOG.info("NOTIFICATION_LOG table already exist, ignoring");
+         } else {
+           throw e;
+         }
+       }
+ 
+       stmt.execute("INSERT INTO \"APP\".\"SEQUENCE_TABLE\" (\"SEQUENCE_NAME\", \"NEXT_VAL\") " +
+               "SELECT * FROM (VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', " +
+               "1)) tmp_table WHERE NOT EXISTS ( SELECT \"NEXT_VAL\" FROM \"APP\"" +
+               ".\"SEQUENCE_TABLE\" WHERE \"SEQUENCE_NAME\" = 'org.apache.hadoop.hive.metastore" +
+               ".model.MNotificationLog')");
+ 
+       stmt.execute("INSERT INTO \"APP\".\"NOTIFICATION_SEQUENCE\" (\"NNI_ID\", \"NEXT_EVENT_ID\")" +
+               " SELECT * FROM (VALUES (1,1)) tmp_table WHERE NOT EXISTS ( SELECT " +
+               "\"NEXT_EVENT_ID\" FROM \"APP\".\"NOTIFICATION_SEQUENCE\")");
+ 
+       try {
+         stmt.execute("CREATE TABLE TXN_WRITE_NOTIFICATION_LOG (" +
+                 "WNL_ID bigint NOT NULL," +
+                 "WNL_TXNID bigint NOT NULL," +
+                 "WNL_WRITEID bigint NOT NULL," +
+                 "WNL_DATABASE varchar(128) NOT NULL," +
+                 "WNL_TABLE varchar(128) NOT NULL," +
+                 "WNL_PARTITION varchar(1024) NOT NULL," +
+                 "WNL_TABLE_OBJ clob NOT NULL," +
+                 "WNL_PARTITION_OBJ clob," +
+                 "WNL_FILES clob," +
+                 "WNL_EVENT_TIME integer NOT NULL," +
+                 "PRIMARY KEY (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION))"
+         );
+       } catch (SQLException e) {
+         if (e.getMessage() != null && e.getMessage().contains("already exists")) {
+           LOG.info("TXN_WRITE_NOTIFICATION_LOG table already exist, ignoring");
+         } else {
+           throw e;
+         }
+       }
+ 
+       stmt.execute("INSERT INTO \"APP\".\"SEQUENCE_TABLE\" (\"SEQUENCE_NAME\", \"NEXT_VAL\") " +
+               "SELECT * FROM (VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', " +
+               "1)) tmp_table WHERE NOT EXISTS ( SELECT \"NEXT_VAL\" FROM \"APP\"" +
+               ".\"SEQUENCE_TABLE\" WHERE \"SEQUENCE_NAME\" = 'org.apache.hadoop.hive.metastore" +
+               ".model.MTxnWriteNotificationLog')");
+     } catch (SQLException e) {
+       try {
+         conn.rollback();
+       } catch (SQLException re) {
+         LOG.error("Error rolling back: " + re.getMessage());
+       }
+ 
+       // Another thread might have already created these tables.
+       if (e.getMessage() != null && e.getMessage().contains("already exists")) {
+         LOG.info("Txn tables already exist, returning");
+         return;
+       }
+ 
+       // This might be a deadlock, if so, let's retry
+       if (e instanceof SQLTransactionRollbackException && deadlockCnt++ < 5) {
+         LOG.warn("Caught deadlock, retrying db creation");
+         prepDb(conf);
+       } else {
+         throw e;
+       }
+     } finally {
+       deadlockCnt = 0;
+       closeResources(conn, stmt, null);
+     }
+   }
+ 
+   public static void cleanDb(Configuration conf) throws Exception {
+     int retryCount = 0;
+     while(++retryCount <= 3) {
+       boolean success = true;
+       Connection conn = null;
+       Statement stmt = null;
+       try {
+         conn = getConnection(conf);
+         stmt = conn.createStatement();
+ 
+         // We want to try these, whether they succeed or fail.
+         try {
+           stmt.execute("DROP INDEX HL_TXNID_INDEX");
+         } catch (SQLException e) {
+           if(!("42X65".equals(e.getSQLState()) && 30000 == e.getErrorCode())) {
+             //42X65/3000 means index doesn't exist
+             LOG.error("Unable to drop index HL_TXNID_INDEX " + e.getMessage() +
+               "State=" + e.getSQLState() + " code=" + e.getErrorCode() + " retryCount=" + retryCount);
+             success = false;
+           }
+         }
+ 
+         success &= dropTable(stmt, "TXN_COMPONENTS", retryCount);
+         success &= dropTable(stmt, "COMPLETED_TXN_COMPONENTS", retryCount);
+         success &= dropTable(stmt, "TXNS", retryCount);
+         success &= dropTable(stmt, "NEXT_TXN_ID", retryCount);
+         success &= dropTable(stmt, "TXN_TO_WRITE_ID", retryCount);
+         success &= dropTable(stmt, "NEXT_WRITE_ID", retryCount);
+         success &= dropTable(stmt, "MIN_HISTORY_LEVEL", retryCount);
+         success &= dropTable(stmt, "HIVE_LOCKS", retryCount);
+         success &= dropTable(stmt, "NEXT_LOCK_ID", retryCount);
+         success &= dropTable(stmt, "COMPACTION_QUEUE", retryCount);
+         success &= dropTable(stmt, "NEXT_COMPACTION_QUEUE_ID", retryCount);
+         success &= dropTable(stmt, "COMPLETED_COMPACTIONS", retryCount);
+         success &= dropTable(stmt, "AUX_TABLE", retryCount);
+         success &= dropTable(stmt, "WRITE_SET", retryCount);
+         success &= dropTable(stmt, "REPL_TXN_MAP", retryCount);
+         success &= dropTable(stmt, "MATERIALIZATION_REBUILD_LOCKS", retryCount);
+         /*
+          * Don't drop NOTIFICATION_LOG, SEQUENCE_TABLE and NOTIFICATION_SEQUENCE as its used by other
+          * table which are not txn related to generate primary key. So if these tables are dropped
+          *  and other tables are not dropped, then it will create key duplicate error while inserting
+          *  to other table.
+          */
+       } finally {
+         closeResources(conn, stmt, null);
+       }
+       if(success) {
+         return;
+       }
+     }
+     throw new RuntimeException("Failed to clean up txn tables");
+   }
+ 
+   private static boolean dropTable(Statement stmt, String name, int retryCount) throws SQLException {
+     for (int i = 0; i < 3; i++) {
+       try {
+         stmt.execute("DROP TABLE " + name);
+         LOG.debug("Successfully dropped table " + name);
+         return true;
+       } catch (SQLException e) {
+         if ("42Y55".equals(e.getSQLState()) && 30000 == e.getErrorCode()) {
+           LOG.debug("Not dropping " + name + " because it doesn't exist");
+           //failed because object doesn't exist
+           return true;
+         }
+         if ("X0Y25".equals(e.getSQLState()) && 30000 == e.getErrorCode()) {
+           // Intermittent failure
+           LOG.warn("Intermittent drop failure, retrying, try number " + i);
+           continue;
+         }
+         LOG.error("Unable to drop table " + name + ": " + e.getMessage() +
+             " State=" + e.getSQLState() + " code=" + e.getErrorCode() + " retryCount=" + retryCount);
+       }
+     }
+     LOG.error("Failed to drop table, don't know why");
+     return false;
+   }
+ 
+   /**
+    * A tool to count the number of partitions, tables,
+    * and databases locked by a particular lockId.
+    *
+    * @param lockId lock id to look for lock components
+    *
+    * @return number of components, or 0 if there is no lock
+    */
+   public static int countLockComponents(Configuration conf, long lockId) throws Exception {
+     Connection conn = null;
+     PreparedStatement stmt = null;
+     ResultSet rs = null;
+     try {
+       conn = getConnection(conf);
+       stmt = conn.prepareStatement("SELECT count(*) FROM hive_locks WHERE hl_lock_ext_id = ?");
+       stmt.setLong(1, lockId);
+       rs = stmt.executeQuery();
+       if (!rs.next()) {
+         return 0;
+       }
+       return rs.getInt(1);
+     } finally {
+       closeResources(conn, stmt, rs);
+     }
+   }
+ 
+   /**
++   * Return true if the transaction of the given txnId is open.
++   * @param conf    HiveConf
++   * @param txnId   transaction id to search for
++   * @return
++   * @throws Exception
++   */
++  public static boolean isOpenOrAbortedTransaction(Configuration conf, long txnId) throws Exception {
++    Connection conn = null;
++    PreparedStatement stmt = null;
++    ResultSet rs = null;
++    try {
++      conn = getConnection(conf);
++      conn.setAutoCommit(false);
++      conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
++
++      stmt = conn.prepareStatement("SELECT txn_id FROM TXNS WHERE txn_id = ?");
++      stmt.setLong(1, txnId);
++      rs = stmt.executeQuery();
++      if (!rs.next()) {
++        return false;
++      } else {
++        return true;
++      }
++    } finally {
++      closeResources(conn, stmt, rs);
++    }
++  }
++
++  /**
+    * Utility method used to run COUNT queries like "select count(*) from ..." against metastore tables
+    * @param countQuery countQuery text
+    * @return count countQuery result
+    * @throws Exception
+    */
+   public static int countQueryAgent(Configuration conf, String countQuery) throws Exception {
+     Connection conn = null;
+     Statement stmt = null;
+     ResultSet rs = null;
+     try {
+       conn = getConnection(conf);
+       stmt = conn.createStatement();
+       rs = stmt.executeQuery(countQuery);
+       if (!rs.next()) {
+         return 0;
+       }
+       return rs.getInt(1);
+     } finally {
+       closeResources(conn, stmt, rs);
+     }
+   }
+   @VisibleForTesting
+   public static String queryToString(Configuration conf, String query) throws Exception {
+     return queryToString(conf, query, true);
+   }
+   public static String queryToString(Configuration conf, String query, boolean includeHeader)
+       throws Exception {
+     Connection conn = null;
+     Statement stmt = null;
+     ResultSet rs = null;
+     StringBuilder sb = new StringBuilder();
+     try {
+       conn = getConnection(conf);
+       stmt = conn.createStatement();
+       rs = stmt.executeQuery(query);
+       ResultSetMetaData rsmd = rs.getMetaData();
+       if(includeHeader) {
+         for (int colPos = 1; colPos <= rsmd.getColumnCount(); colPos++) {
+           sb.append(rsmd.getColumnName(colPos)).append("   ");
+         }
+         sb.append('\n');
+       }
+       while(rs.next()) {
+         for (int colPos = 1; colPos <= rsmd.getColumnCount(); colPos++) {
+           sb.append(rs.getObject(colPos)).append("   ");
+         }
+         sb.append('\n');
+       }
+     } finally {
+       closeResources(conn, stmt, rs);
+     }
+     return sb.toString();
+   }
+ 
+   static Connection getConnection(Configuration conf) throws Exception {
+     String jdbcDriver = MetastoreConf.getVar(conf, ConfVars.CONNECTION_DRIVER);
+     Driver driver = (Driver) Class.forName(jdbcDriver).newInstance();
+     Properties prop = new Properties();
+     String driverUrl = MetastoreConf.getVar(conf, ConfVars.CONNECT_URL_KEY);
+     String user = MetastoreConf.getVar(conf, ConfVars.CONNECTION_USER_NAME);
+     String passwd = MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.PWD);
+     prop.setProperty("user", user);
+     prop.setProperty("password", passwd);
+     Connection conn = driver.connect(driverUrl, prop);
+     conn.setAutoCommit(true);
+     return conn;
+   }
+ 
+   static void closeResources(Connection conn, Statement stmt, ResultSet rs) {
+     if (rs != null) {
+       try {
+         rs.close();
+       } catch (SQLException e) {
+         LOG.error("Error closing ResultSet: " + e.getMessage());
+       }
+     }
+ 
+     if (stmt != null) {
+       try {
+         stmt.close();
+       } catch (SQLException e) {
+         System.err.println("Error closing Statement: " + e.getMessage());
+       }
+     }
+ 
+     if (conn != null) {
+       try {
+         conn.rollback();
+       } catch (SQLException e) {
+         System.err.println("Error rolling back: " + e.getMessage());
+       }
+       try {
+         conn.close();
+       } catch (SQLException e) {
+         System.err.println("Error closing Connection: " + e.getMessage());
+       }
+     }
+   }
+ }


[46/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0723 addendum

Posted by se...@apache.org.
HIVE-19416 : merge master into branch (Sergey Shelukhin) 0723 addendum


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8047dd88
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8047dd88
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8047dd88

Branch: refs/heads/master
Commit: 8047dd882bfa706ae96f18e2fe0a04c818850d14
Parents: d2c60f3 5e7aa09
Author: sergey <se...@apache.org>
Authored: Mon Jul 23 11:51:14 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Mon Jul 23 11:51:14 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hive/ql/plan/ExplainLockDesc.java    | 116 +++++++++++++++++++
 1 file changed, 116 insertions(+)
----------------------------------------------------------------------



[37/50] [abbrv] hive git commit: HIVE-20219 : verify that analyze and analyze for columns manage txn stats state correctly (Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-20219 : verify that analyze and analyze for columns manage txn stats state correctly (Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/31ee8704
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/31ee8704
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/31ee8704

Branch: refs/heads/master
Commit: 31ee8704e0dc5f8b9ef9530dbff4d3d1332be863
Parents: f2d5ac2
Author: sergey <se...@apache.org>
Authored: Fri Jul 20 17:41:32 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Fri Jul 20 17:41:32 2018 -0700

----------------------------------------------------------------------
 ql/src/test/queries/clientpositive/acid_stats.q |  15 -
 .../test/queries/clientpositive/acid_stats5.q   |  68 ++
 .../results/clientpositive/acid_stats.q.out     | 140 ---
 .../results/clientpositive/acid_stats5.q.out    | 849 +++++++++++++++++++
 4 files changed, 917 insertions(+), 155 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/31ee8704/ql/src/test/queries/clientpositive/acid_stats.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/acid_stats.q b/ql/src/test/queries/clientpositive/acid_stats.q
index 15eb930..8fed89a 100644
--- a/ql/src/test/queries/clientpositive/acid_stats.q
+++ b/ql/src/test/queries/clientpositive/acid_stats.q
@@ -30,18 +30,3 @@ explain select count(key) from stats_part;
 
 drop table stats_part;
 
--- test the case where we insert without updating stats... just in case
-
-create table stats2(key int,value string) tblproperties ("transactional"="true", "transactional_properties"="insert_only");
-insert into table stats2  values (1, "foo");
-explain select count(*) from stats2;
-insert into table stats2  values (2, "bar");
-explain select count(*) from stats2;
-desc formatted stats2 key;
-
-set hive.stats.autogather=false;
-set hive.stats.column.autogather=false;
-insert into table stats2  values (1, "baz");
-explain select count(*) from stats2;
-
-drop table stats2;

http://git-wip-us.apache.org/repos/asf/hive/blob/31ee8704/ql/src/test/queries/clientpositive/acid_stats5.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/acid_stats5.q b/ql/src/test/queries/clientpositive/acid_stats5.q
new file mode 100644
index 0000000..be6a581
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/acid_stats5.q
@@ -0,0 +1,68 @@
+set hive.stats.dbclass=fs;
+set hive.stats.fetch.column.stats=true;
+set datanucleus.cache.collections=false;
+
+set hive.merge.mapfiles=false;
+set hive.merge.mapredfiles=false;
+
+set hive.stats.autogather=true;
+set hive.stats.column.autogather=true;
+set hive.compute.query.using.stats=true;
+set hive.mapred.mode=nonstrict;
+set hive.explain.user=false;
+
+set hive.fetch.task.conversion=none;
+set hive.support.concurrency=true;
+set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
+set hive.query.results.cache.enabled=false;
+
+-- Test various scenarios where stats become invalid; verify they are invalid, and that analyze works.
+
+create table stats2(key int,value string) tblproperties ("transactional"="true", "transactional_properties"="insert_only");
+insert into table stats2  values (1, "foo");
+insert into table stats2  values (2, "bar");
+explain select count(*) from stats2;
+explain select min(key) from stats2;
+
+set hive.stats.autogather=false;
+set hive.stats.column.autogather=false;
+insert into table stats2  values (3, "baz");
+set hive.stats.autogather=true;
+set hive.stats.column.autogather=true;
+desc formatted stats2;
+desc formatted stats2 key;
+explain select count(*) from stats2;
+explain select count(distinct key) from stats2;
+
+analyze table stats2 compute statistics;
+desc formatted stats2;
+desc formatted stats2 key;
+explain select count(*) from stats2;
+explain select min(key) from stats2;
+
+analyze table stats2 compute statistics for columns;
+desc formatted stats2;
+desc formatted stats2 key;
+explain select count(*) from stats2;
+explain select min(key) from stats2;
+
+
+truncate table stats2;
+desc formatted stats2;
+desc formatted stats2 key;
+explain select count(*) from stats2;
+explain select count(distinct key) from stats2;
+
+analyze table stats2 compute statistics;
+desc formatted stats2;
+desc formatted stats2 key;
+explain select count(*) from stats2;
+explain select min(key) from stats2;
+
+analyze table stats2 compute statistics for columns;
+desc formatted stats2;
+desc formatted stats2 key;
+explain select count(*) from stats2;
+explain select min(key) from stats2;
+
+drop table stats2;

http://git-wip-us.apache.org/repos/asf/hive/blob/31ee8704/ql/src/test/results/clientpositive/acid_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/acid_stats.q.out b/ql/src/test/results/clientpositive/acid_stats.q.out
index 8dcfdfb..02dee99 100644
--- a/ql/src/test/results/clientpositive/acid_stats.q.out
+++ b/ql/src/test/results/clientpositive/acid_stats.q.out
@@ -84,143 +84,3 @@ POSTHOOK: query: drop table stats_part
 POSTHOOK: type: DROPTABLE
 POSTHOOK: Input: default@stats_part
 POSTHOOK: Output: default@stats_part
-PREHOOK: query: create table stats2(key int,value string) tblproperties ("transactional"="true", "transactional_properties"="insert_only")
-PREHOOK: type: CREATETABLE
-PREHOOK: Output: database:default
-PREHOOK: Output: default@stats2
-POSTHOOK: query: create table stats2(key int,value string) tblproperties ("transactional"="true", "transactional_properties"="insert_only")
-POSTHOOK: type: CREATETABLE
-POSTHOOK: Output: database:default
-POSTHOOK: Output: default@stats2
-PREHOOK: query: insert into table stats2  values (1, "foo")
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@stats2
-POSTHOOK: query: insert into table stats2  values (1, "foo")
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@stats2
-POSTHOOK: Lineage: stats2.key SCRIPT []
-POSTHOOK: Lineage: stats2.value SCRIPT []
-PREHOOK: query: explain select count(*) from stats2
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select count(*) from stats2
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: 1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: insert into table stats2  values (2, "bar")
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@stats2
-POSTHOOK: query: insert into table stats2  values (2, "bar")
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@stats2
-POSTHOOK: Lineage: stats2.key SCRIPT []
-POSTHOOK: Lineage: stats2.value SCRIPT []
-PREHOOK: query: explain select count(*) from stats2
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select count(*) from stats2
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-0 is a root stage
-
-STAGE PLANS:
-  Stage: Stage-0
-    Fetch Operator
-      limit: 1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: desc formatted stats2 key
-PREHOOK: type: DESCTABLE
-PREHOOK: Input: default@stats2
-POSTHOOK: query: desc formatted stats2 key
-POSTHOOK: type: DESCTABLE
-POSTHOOK: Input: default@stats2
-col_name            	key                 	 	 	 	 	 	 	 	 	 	 
-data_type           	int                 	 	 	 	 	 	 	 	 	 	 
-min                 	1                   	 	 	 	 	 	 	 	 	 	 
-max                 	2                   	 	 	 	 	 	 	 	 	 	 
-num_nulls           	0                   	 	 	 	 	 	 	 	 	 	 
-distinct_count      	2                   	 	 	 	 	 	 	 	 	 	 
-avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
-max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
-num_trues           	                    	 	 	 	 	 	 	 	 	 	 
-num_falses          	                    	 	 	 	 	 	 	 	 	 	 
-bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
-comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
-COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
-PREHOOK: query: insert into table stats2  values (1, "baz")
-PREHOOK: type: QUERY
-PREHOOK: Input: _dummy_database@_dummy_table
-PREHOOK: Output: default@stats2
-POSTHOOK: query: insert into table stats2  values (1, "baz")
-POSTHOOK: type: QUERY
-POSTHOOK: Input: _dummy_database@_dummy_table
-POSTHOOK: Output: default@stats2
-POSTHOOK: Lineage: stats2.key SCRIPT []
-POSTHOOK: Lineage: stats2.value SCRIPT []
-PREHOOK: query: explain select count(*) from stats2
-PREHOOK: type: QUERY
-POSTHOOK: query: explain select count(*) from stats2
-POSTHOOK: type: QUERY
-STAGE DEPENDENCIES:
-  Stage-1 is a root stage
-  Stage-0 depends on stages: Stage-1
-
-STAGE PLANS:
-  Stage: Stage-1
-    Map Reduce
-      Map Operator Tree:
-          TableScan
-            alias: stats2
-            Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
-            Select Operator
-              Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
-              Group By Operator
-                aggregations: count()
-                mode: hash
-                outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                Reduce Output Operator
-                  sort order: 
-                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-                  value expressions: _col0 (type: bigint)
-      Execution mode: vectorized
-      Reduce Operator Tree:
-        Group By Operator
-          aggregations: count(VALUE._col0)
-          mode: mergepartial
-          outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-          File Output Operator
-            compressed: false
-            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
-            table:
-                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
-                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
-                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
-
-  Stage: Stage-0
-    Fetch Operator
-      limit: -1
-      Processor Tree:
-        ListSink
-
-PREHOOK: query: drop table stats2
-PREHOOK: type: DROPTABLE
-PREHOOK: Input: default@stats2
-PREHOOK: Output: default@stats2
-POSTHOOK: query: drop table stats2
-POSTHOOK: type: DROPTABLE
-POSTHOOK: Input: default@stats2
-POSTHOOK: Output: default@stats2

http://git-wip-us.apache.org/repos/asf/hive/blob/31ee8704/ql/src/test/results/clientpositive/acid_stats5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/acid_stats5.q.out b/ql/src/test/results/clientpositive/acid_stats5.q.out
new file mode 100644
index 0000000..99b19bb
--- /dev/null
+++ b/ql/src/test/results/clientpositive/acid_stats5.q.out
@@ -0,0 +1,849 @@
+PREHOOK: query: create table stats2(key int,value string) tblproperties ("transactional"="true", "transactional_properties"="insert_only")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@stats2
+POSTHOOK: query: create table stats2(key int,value string) tblproperties ("transactional"="true", "transactional_properties"="insert_only")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@stats2
+PREHOOK: query: insert into table stats2  values (1, "foo")
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@stats2
+POSTHOOK: query: insert into table stats2  values (1, "foo")
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@stats2
+POSTHOOK: Lineage: stats2.key SCRIPT []
+POSTHOOK: Lineage: stats2.value SCRIPT []
+PREHOOK: query: insert into table stats2  values (2, "bar")
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@stats2
+POSTHOOK: query: insert into table stats2  values (2, "bar")
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@stats2
+POSTHOOK: Lineage: stats2.key SCRIPT []
+POSTHOOK: Lineage: stats2.value SCRIPT []
+PREHOOK: query: explain select count(*) from stats2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select min(key) from stats2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select min(key) from stats2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: insert into table stats2  values (3, "baz")
+PREHOOK: type: QUERY
+PREHOOK: Input: _dummy_database@_dummy_table
+PREHOOK: Output: default@stats2
+POSTHOOK: query: insert into table stats2  values (3, "baz")
+POSTHOOK: type: QUERY
+POSTHOOK: Input: _dummy_database@_dummy_table
+POSTHOOK: Output: default@stats2
+POSTHOOK: Lineage: stats2.key SCRIPT []
+POSTHOOK: Lineage: stats2.value SCRIPT []
+PREHOOK: query: desc formatted stats2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats2
+POSTHOOK: query: desc formatted stats2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats2
+# col_name            	data_type           	comment             
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	bucketing_version   	2                   
+	numFiles            	3                   
+	numRows             	2                   
+	rawDataSize         	10                  
+	totalSize           	18                  
+	transactional       	true                
+	transactional_properties	insert_only         
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: desc formatted stats2 key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats2
+POSTHOOK: query: desc formatted stats2 key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats2
+col_name            	key                 	 	 	 	 	 	 	 	 	 	 
+data_type           	int                 	 	 	 	 	 	 	 	 	 	 
+min                 	1                   	 	 	 	 	 	 	 	 	 	 
+max                 	2                   	 	 	 	 	 	 	 	 	 	 
+num_nulls           	0                   	 	 	 	 	 	 	 	 	 	 
+distinct_count      	2                   	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: explain select count(*) from stats2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: stats2
+            Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              Statistics: Num rows: 2 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE
+              Group By Operator
+                aggregations: count()
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col0 (type: bigint)
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select count(distinct key) from stats2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(distinct key) from stats2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: stats2
+            Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              expressions: key (type: int)
+              outputColumnNames: key
+              Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+              Group By Operator
+                keys: key (type: int)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: int)
+          mode: partial2
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+          Group By Operator
+            aggregations: count(_col0)
+            mode: partial2
+            outputColumnNames: _col0
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+              value expressions: _col0 (type: bigint)
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: analyze table stats2 compute statistics
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats2
+PREHOOK: Output: default@stats2
+POSTHOOK: query: analyze table stats2 compute statistics
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats2
+POSTHOOK: Output: default@stats2
+PREHOOK: query: desc formatted stats2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats2
+POSTHOOK: query: desc formatted stats2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats2
+# col_name            	data_type           	comment             
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	bucketing_version   	2                   
+	numFiles            	3                   
+	numRows             	3                   
+	rawDataSize         	15                  
+	totalSize           	18                  
+	transactional       	true                
+	transactional_properties	insert_only         
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: desc formatted stats2 key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats2
+POSTHOOK: query: desc formatted stats2 key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats2
+col_name            	key                 	 	 	 	 	 	 	 	 	 	 
+data_type           	int                 	 	 	 	 	 	 	 	 	 	 
+min                 	1                   	 	 	 	 	 	 	 	 	 	 
+max                 	2                   	 	 	 	 	 	 	 	 	 	 
+num_nulls           	0                   	 	 	 	 	 	 	 	 	 	 
+distinct_count      	2                   	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: explain select count(*) from stats2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select min(key) from stats2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select min(key) from stats2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: stats2
+            Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              expressions: key (type: int)
+              outputColumnNames: key
+              Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE
+              Group By Operator
+                aggregations: min(key)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col0 (type: int)
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: min(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: analyze table stats2 compute statistics for columns
+PREHOOK: type: ANALYZE_TABLE
+PREHOOK: Input: default@stats2
+PREHOOK: Output: default@stats2
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table stats2 compute statistics for columns
+POSTHOOK: type: ANALYZE_TABLE
+POSTHOOK: Input: default@stats2
+POSTHOOK: Output: default@stats2
+#### A masked pattern was here ####
+PREHOOK: query: desc formatted stats2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats2
+POSTHOOK: query: desc formatted stats2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats2
+# col_name            	data_type           	comment             
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	bucketing_version   	2                   
+	numFiles            	3                   
+	numRows             	3                   
+	rawDataSize         	15                  
+	totalSize           	18                  
+	transactional       	true                
+	transactional_properties	insert_only         
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: desc formatted stats2 key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats2
+POSTHOOK: query: desc formatted stats2 key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats2
+col_name            	key                 	 	 	 	 	 	 	 	 	 	 
+data_type           	int                 	 	 	 	 	 	 	 	 	 	 
+min                 	1                   	 	 	 	 	 	 	 	 	 	 
+max                 	3                   	 	 	 	 	 	 	 	 	 	 
+num_nulls           	0                   	 	 	 	 	 	 	 	 	 	 
+distinct_count      	3                   	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: explain select count(*) from stats2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select min(key) from stats2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select min(key) from stats2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: truncate table stats2
+PREHOOK: type: TRUNCATETABLE
+PREHOOK: Output: default@stats2
+POSTHOOK: query: truncate table stats2
+POSTHOOK: type: TRUNCATETABLE
+POSTHOOK: Output: default@stats2
+PREHOOK: query: desc formatted stats2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats2
+POSTHOOK: query: desc formatted stats2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats2
+# col_name            	data_type           	comment             
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	bucketing_version   	2                   
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
+	transactional       	true                
+	transactional_properties	insert_only         
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: desc formatted stats2 key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats2
+POSTHOOK: query: desc formatted stats2 key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats2
+col_name            	key                 	 	 	 	 	 	 	 	 	 	 
+data_type           	int                 	 	 	 	 	 	 	 	 	 	 
+min                 	1                   	 	 	 	 	 	 	 	 	 	 
+max                 	3                   	 	 	 	 	 	 	 	 	 	 
+num_nulls           	0                   	 	 	 	 	 	 	 	 	 	 
+distinct_count      	3                   	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: explain select count(*) from stats2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select count(distinct key) from stats2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(distinct key) from stats2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-2 depends on stages: Stage-1
+  Stage-0 depends on stages: Stage-2
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: stats2
+            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              expressions: key (type: int)
+              outputColumnNames: key
+              Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+              Group By Operator
+                keys: key (type: int)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  key expressions: _col0 (type: int)
+                  sort order: +
+                  Map-reduce partition columns: _col0 (type: int)
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          keys: KEY._col0 (type: int)
+          mode: partial2
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+          Group By Operator
+            aggregations: count(_col0)
+            mode: partial2
+            outputColumnNames: _col0
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            File Output Operator
+              compressed: false
+              table:
+                  input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
+
+  Stage: Stage-2
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            Reduce Output Operator
+              sort order: 
+              Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+              value expressions: _col0 (type: bigint)
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: count(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: analyze table stats2 compute statistics
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stats2
+PREHOOK: Output: default@stats2
+POSTHOOK: query: analyze table stats2 compute statistics
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stats2
+POSTHOOK: Output: default@stats2
+PREHOOK: query: desc formatted stats2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats2
+POSTHOOK: query: desc formatted stats2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats2
+# col_name            	data_type           	comment             
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}
+	bucketing_version   	2                   
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
+	transactional       	true                
+	transactional_properties	insert_only         
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: desc formatted stats2 key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats2
+POSTHOOK: query: desc formatted stats2 key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats2
+col_name            	key                 	 	 	 	 	 	 	 	 	 	 
+data_type           	int                 	 	 	 	 	 	 	 	 	 	 
+min                 	1                   	 	 	 	 	 	 	 	 	 	 
+max                 	3                   	 	 	 	 	 	 	 	 	 	 
+num_nulls           	0                   	 	 	 	 	 	 	 	 	 	 
+distinct_count      	3                   	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	HL                  	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\"}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: explain select count(*) from stats2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select min(key) from stats2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select min(key) from stats2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-1 is a root stage
+  Stage-0 depends on stages: Stage-1
+
+STAGE PLANS:
+  Stage: Stage-1
+    Map Reduce
+      Map Operator Tree:
+          TableScan
+            alias: stats2
+            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+            Select Operator
+              expressions: key (type: int)
+              outputColumnNames: key
+              Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+              Group By Operator
+                aggregations: min(key)
+                mode: hash
+                outputColumnNames: _col0
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                Reduce Output Operator
+                  sort order: 
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+                  value expressions: _col0 (type: int)
+      Execution mode: vectorized
+      Reduce Operator Tree:
+        Group By Operator
+          aggregations: min(VALUE._col0)
+          mode: mergepartial
+          outputColumnNames: _col0
+          Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+          File Output Operator
+            compressed: false
+            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
+            table:
+                input format: org.apache.hadoop.mapred.SequenceFileInputFormat
+                output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
+                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+
+  Stage: Stage-0
+    Fetch Operator
+      limit: -1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: analyze table stats2 compute statistics for columns
+PREHOOK: type: ANALYZE_TABLE
+PREHOOK: Input: default@stats2
+PREHOOK: Output: default@stats2
+#### A masked pattern was here ####
+POSTHOOK: query: analyze table stats2 compute statistics for columns
+POSTHOOK: type: ANALYZE_TABLE
+POSTHOOK: Input: default@stats2
+POSTHOOK: Output: default@stats2
+#### A masked pattern was here ####
+PREHOOK: query: desc formatted stats2
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats2
+POSTHOOK: query: desc formatted stats2
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats2
+# col_name            	data_type           	comment             
+key                 	int                 	                    
+value               	string              	                    
+	 	 
+# Detailed Table Information	 	 
+Database:           	default             	 
+#### A masked pattern was here ####
+Retention:          	0                   	 
+#### A masked pattern was here ####
+Table Type:         	MANAGED_TABLE       	 
+Table Parameters:	 	 
+	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
+	bucketing_version   	2                   
+	numFiles            	0                   
+	numRows             	0                   
+	rawDataSize         	0                   
+	totalSize           	0                   
+	transactional       	true                
+	transactional_properties	insert_only         
+#### A masked pattern was here ####
+	 	 
+# Storage Information	 	 
+SerDe Library:      	org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe	 
+InputFormat:        	org.apache.hadoop.mapred.TextInputFormat	 
+OutputFormat:       	org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat	 
+Compressed:         	No                  	 
+Num Buckets:        	-1                  	 
+Bucket Columns:     	[]                  	 
+Sort Columns:       	[]                  	 
+Storage Desc Params:	 	 
+	serialization.format	1                   
+PREHOOK: query: desc formatted stats2 key
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@stats2
+POSTHOOK: query: desc formatted stats2 key
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@stats2
+col_name            	key                 	 	 	 	 	 	 	 	 	 	 
+data_type           	int                 	 	 	 	 	 	 	 	 	 	 
+min                 	1                   	 	 	 	 	 	 	 	 	 	 
+max                 	3                   	 	 	 	 	 	 	 	 	 	 
+num_nulls           	0                   	 	 	 	 	 	 	 	 	 	 
+distinct_count      	0                   	 	 	 	 	 	 	 	 	 	 
+avg_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+max_col_len         	                    	 	 	 	 	 	 	 	 	 	 
+num_trues           	                    	 	 	 	 	 	 	 	 	 	 
+num_falses          	                    	 	 	 	 	 	 	 	 	 	 
+bitVector           	                    	 	 	 	 	 	 	 	 	 	 
+comment             	from deserializer   	 	 	 	 	 	 	 	 	 	 
+COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}	 	 	 	 	 	 	 	 	 	 
+PREHOOK: query: explain select count(*) from stats2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select count(*) from stats2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: explain select min(key) from stats2
+PREHOOK: type: QUERY
+POSTHOOK: query: explain select min(key) from stats2
+POSTHOOK: type: QUERY
+STAGE DEPENDENCIES:
+  Stage-0 is a root stage
+
+STAGE PLANS:
+  Stage: Stage-0
+    Fetch Operator
+      limit: 1
+      Processor Tree:
+        ListSink
+
+PREHOOK: query: drop table stats2
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@stats2
+PREHOOK: Output: default@stats2
+POSTHOOK: query: drop table stats2
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@stats2
+POSTHOOK: Output: default@stats2


[34/50] [abbrv] hive git commit: HIVE-20047 : remove txnID argument for txn stats methods (Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-20047 : remove txnID argument for txn stats methods (Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cdb32a7f
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cdb32a7f
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cdb32a7f

Branch: refs/heads/master
Commit: cdb32a7fbabc9baea535b94da159965eda4e23a8
Parents: 651e795
Author: sergey <se...@apache.org>
Authored: Thu Jul 19 15:48:39 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Thu Jul 19 15:48:39 2018 -0700

----------------------------------------------------------------------
 .../listener/DummyRawStoreFailEvent.java        |  37 ++-
 .../metastore/SynchronizedMetaStoreClient.java  |   4 +-
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java |  23 +-
 .../apache/hadoop/hive/ql/metadata/Hive.java    |  35 +--
 .../ql/metadata/SessionHiveMetaStoreClient.java |   9 +-
 .../hive/ql/optimizer/StatsOptimizer.java       |   4 -
 .../hadoop/hive/ql/stats/ColStatsProcessor.java |   1 -
 .../hive/ql/stats/StatsUpdaterThread.java       |   4 +-
 .../apache/hadoop/hive/ql/TestTxnCommands.java  |   2 +-
 .../hive/ql/stats/TestStatsUpdaterThread.java   |  34 +--
 .../metastore/api/AddPartitionsRequest.java     | 117 +--------
 .../metastore/api/AlterPartitionsRequest.java   | 131 ++--------
 .../hive/metastore/api/AlterTableRequest.java   | 131 ++--------
 .../hive/metastore/api/GetTableRequest.java     | 113 +--------
 .../metastore/api/PartitionsStatsRequest.java   | 121 +--------
 .../metastore/api/RenamePartitionRequest.java   | 121 +--------
 .../api/SetPartitionsStatsRequest.java          | 131 ++--------
 .../hive/metastore/api/TableStatsRequest.java   | 121 +--------
 .../metastore/api/TruncateTableRequest.java     | 131 ++--------
 .../src/gen/thrift/gen-php/metastore/Types.php  | 247 ++-----------------
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  | 192 +++-----------
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |  42 +---
 .../src/main/thrift/hive_metastore.thrift       |  33 +--
 .../hadoop/hive/metastore/AlterHandler.java     |   8 +-
 .../hadoop/hive/metastore/HiveAlterHandler.java |  52 ++--
 .../hadoop/hive/metastore/HiveMetaStore.java    | 133 +++++-----
 .../hive/metastore/HiveMetaStoreClient.java     |  63 ++---
 .../hadoop/hive/metastore/IHMSHandler.java      |   2 +-
 .../hadoop/hive/metastore/IMetaStoreClient.java |  30 +--
 .../hadoop/hive/metastore/ObjectStore.java      |  92 +++----
 .../apache/hadoop/hive/metastore/RawStore.java  |  20 +-
 .../hive/metastore/cache/CachedStore.java       |  56 ++---
 .../DummyRawStoreControlledCommit.java          |  38 +--
 .../DummyRawStoreForJdoConnection.java          |  22 +-
 .../HiveMetaStoreClientPreCatalog.java          |  41 +--
 .../InjectableBehaviourObjectStore.java         |   5 +-
 .../hive/metastore/TestHiveAlterHandler.java    |   6 +-
 .../hadoop/hive/metastore/TestObjectStore.java  |   4 +-
 .../hadoop/hive/metastore/TestOldSchema.java    |   2 +-
 .../hive/metastore/cache/TestCachedStore.java   |  18 +-
 .../metastore/client/TestAlterPartitions.java   |   4 +-
 41 files changed, 506 insertions(+), 1874 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
----------------------------------------------------------------------
diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index 270aa6c..1c105d1 100644
--- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -268,8 +268,8 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
 
   @Override
   public Table getTable(String catName, String dbName, String tableName,
-                        long txnId, String writeIdList) throws MetaException {
-    return objectStore.getTable(catName, dbName, tableName, txnId, writeIdList);
+                        String writeIdList) throws MetaException {
+    return objectStore.getTable(catName, dbName, tableName, writeIdList);
   }
 
   @Override
@@ -286,9 +286,9 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
 
   @Override
   public Partition getPartition(String catName, String dbName, String tableName,
-                                List<String> partVals, long txnId, String writeIdList)
+                                List<String> partVals, String writeIdList)
       throws MetaException, NoSuchObjectException {
-    return objectStore.getPartition(catName, dbName, tableName, partVals, txnId, writeIdList);
+    return objectStore.getPartition(catName, dbName, tableName, partVals, writeIdList);
   }
 
   @Override
@@ -321,10 +321,10 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
   }
 
   @Override
-  public void alterTable(String catName, String dbName, String name, Table newTable, long queryTxnId, String queryValidWriteIds)
+  public void alterTable(String catName, String dbName, String name, Table newTable, String queryValidWriteIds)
       throws InvalidObjectException, MetaException {
     if (shouldEventSucceed) {
-      objectStore.alterTable(catName, dbName, name, newTable, queryTxnId, queryValidWriteIds);
+      objectStore.alterTable(catName, dbName, name, newTable, queryValidWriteIds);
     } else {
       throw new RuntimeException("Event failed.");
     }
@@ -386,9 +386,9 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
 
   @Override
   public void alterPartition(String catName, String dbName, String tblName, List<String> partVals,
-                             Partition newPart, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
+                             Partition newPart, String queryValidWriteIds) throws InvalidObjectException, MetaException {
     if (shouldEventSucceed) {
-      objectStore.alterPartition(catName, dbName, tblName, partVals, newPart, queryTxnId, queryValidWriteIds);
+      objectStore.alterPartition(catName, dbName, tblName, partVals, newPart, queryValidWriteIds);
     } else {
       throw new RuntimeException("Event failed.");
     }
@@ -397,10 +397,10 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
   @Override
   public void alterPartitions(String catName, String dbName, String tblName,
                               List<List<String>> partValsList, List<Partition> newParts,
-                              long writeId, long queryTxnId, String queryValidWriteIds)
+                              long writeId, String queryValidWriteIds)
       throws InvalidObjectException, MetaException {
     if (shouldEventSucceed) {
-      objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts, writeId, queryTxnId, queryValidWriteIds);
+      objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts, writeId, queryValidWriteIds);
     } else {
       throw new RuntimeException("Event failed.");
     }
@@ -714,9 +714,9 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
   @Override
   public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName,
                                                    List<String> colNames,
-                                                   long txnId, String writeIdList)
+                                                   String writeIdList)
       throws MetaException, NoSuchObjectException {
-    return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames, txnId, writeIdList);
+    return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames, writeIdList);
   }
 
   @Override
@@ -736,16 +736,16 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
   }
 
   @Override
-  public boolean updateTableColumnStatistics(ColumnStatistics statsObj, long txnId, String validWriteIds, long writeId)
+  public boolean updateTableColumnStatistics(ColumnStatistics statsObj, String validWriteIds, long writeId)
       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
-    return objectStore.updateTableColumnStatistics(statsObj, txnId, validWriteIds, writeId);
+    return objectStore.updateTableColumnStatistics(statsObj, validWriteIds, writeId);
   }
 
   @Override
   public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,
-      List<String> partVals, long txnId, String validWriteIds, long writeId)
+      List<String> partVals, String validWriteIds, long writeId)
       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
-    return objectStore.updatePartitionColumnStatistics(statsObj, partVals, txnId, validWriteIds, writeId);
+    return objectStore.updatePartitionColumnStatistics(statsObj, partVals, validWriteIds, writeId);
   }
 
   @Override
@@ -814,11 +814,10 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
   public List<ColumnStatistics> getPartitionColumnStatistics(String catName, String dbName,
                                                              String tblName, List<String> colNames,
                                                              List<String> partNames,
-                                                             long txnId,
                                                              String writeIdList)
       throws MetaException, NoSuchObjectException {
     return objectStore.getPartitionColumnStatistics(
-        catName, dbName, tblName  , colNames, partNames, txnId, writeIdList);
+        catName, dbName, tblName  , colNames, partNames, writeIdList);
   }
 
   @Override
@@ -901,7 +900,7 @@ public class DummyRawStoreFailEvent implements RawStore, Configurable {
   @Override
   public AggrStats get_aggr_stats_for(String catName, String dbName,
                                       String tblName, List<String> partNames, List<String> colNames,
-                                      long txnId, String writeIdList)
+                                      String writeIdList)
       throws MetaException {
     return null;
   }

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java
index 7eddc16..1e279f3 100644
--- a/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java
+++ b/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java
@@ -79,8 +79,8 @@ public final class SynchronizedMetaStoreClient {
   }
 
   public synchronized void alter_partition(String dbName, String tblName,
-      Partition newPart, EnvironmentContext environmentContext, long txnId, String writeIdList) throws TException {
-    client.alter_partition(dbName, tblName, newPart, environmentContext, txnId, writeIdList);
+      Partition newPart, EnvironmentContext environmentContext, String writeIdList) throws TException {
+    client.alter_partition(dbName, tblName, newPart, environmentContext, writeIdList);
   }
 
   public synchronized LockResponse checkLock(long lockid) throws TException {

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index 9b66bcf..183d690 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -1610,31 +1610,21 @@ public class AcidUtils {
   }
 
   public static class TableSnapshot {
-    private long txnId;
     private long writeId;
     private String validWriteIdList;
 
     public TableSnapshot() {
     }
 
-    public TableSnapshot(long txnId, long writeId, String validWriteIdList) {
-      this.txnId = txnId;
+    public TableSnapshot(long writeId, String validWriteIdList) {
       this.writeId = writeId;
       this.validWriteIdList = validWriteIdList;
     }
 
-    public long getTxnId() {
-      return txnId;
-    }
-
     public String getValidWriteIdList() {
       return validWriteIdList;
     }
 
-    public void setTxnId(long txnId) {
-      this.txnId = txnId;
-    }
-
     public long getWriteId() {
       return writeId;
     }
@@ -1649,7 +1639,7 @@ public class AcidUtils {
 
     @Override
     public String toString() {
-      return "[txnId=" + txnId + ", validWriteIdList=" + validWriteIdList + ", writeId=" + writeId + "]";
+      return "[validWriteIdList=" + validWriteIdList + ", writeId=" + writeId + "]";
     }
   }
 
@@ -1681,17 +1671,12 @@ public class AcidUtils {
     if (tblName == null) {
       tblName = tbl.getTableName();
     }
-    long txnId = -1;
     long writeId = -1;
     ValidWriteIdList validWriteIdList = null;
 
     HiveTxnManager sessionTxnMgr = SessionState.get().getTxnMgr();
-
-    if (sessionTxnMgr != null) {
-      txnId = sessionTxnMgr.getCurrentTxnId();
-    }
     String fullTableName = getFullTableName(dbName, tblName);
-    if (txnId > 0) {
+    if (sessionTxnMgr != null && sessionTxnMgr.getCurrentTxnId() > 0) {
       validWriteIdList = getTableValidWriteIdList(conf, fullTableName);
       if (isStatsUpdater) {
         writeId = SessionState.get().getTxnMgr() != null ?
@@ -1718,7 +1703,7 @@ public class AcidUtils {
         throw new AssertionError("Cannot find valid write ID list for " + tblName);
       }
     }
-    return new TableSnapshot(txnId, writeId,
+    return new TableSnapshot(writeId,
         validWriteIdList != null ? validWriteIdList.toString() : null);
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index a9accbf..addbd6d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -652,7 +652,6 @@ public class Hive {
       // Why is alter_partitions synchronized while this isn't?
       getMSC().alter_table(
           catName, dbName, tblName, newTbl.getTTable(), environmentContext,
-          tableSnapshot == null ? -1 : tableSnapshot.getTxnId(),
           tableSnapshot == null ? null : tableSnapshot.getValidWriteIdList());
     } catch (MetaException e) {
       throw new HiveException("Unable to alter table. " + e.getMessage(), e);
@@ -729,7 +728,6 @@ public class Hive {
       }
       getSynchronizedMSC().alter_partition(
           dbName, tblName, newPart.getTPartition(), environmentContext,
-          tableSnapshot == null ? -1 : tableSnapshot.getTxnId(),
           tableSnapshot == null ? null : tableSnapshot.getValidWriteIdList());
 
     } catch (MetaException e) {
@@ -784,9 +782,8 @@ public class Hive {
         newTParts.add(tmpPart.getTPartition());
       }
       getMSC().alter_partitions(names[0], names[1], newTParts, environmentContext,
-          tableSnapshot != null ? tableSnapshot.getTxnId() : -1,
           tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null,
-          tableSnapshot != null ? tableSnapshot.getWriteId() : -1    );
+          tableSnapshot != null ? tableSnapshot.getWriteId() : -1);
     } catch (MetaException e) {
       throw new HiveException("Unable to alter partition. " + e.getMessage(), e);
     } catch (TException e) {
@@ -829,19 +826,17 @@ public class Hive {
         }
       }
       String validWriteIds = null;
-      long txnId = -1;
       if (AcidUtils.isTransactionalTable(tbl)) {
         // Set table snapshot to api.Table to make it persistent.
         TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl, true);
         if (tableSnapshot != null) {
           newPart.getTPartition().setWriteId(tableSnapshot.getWriteId());
-          txnId = tableSnapshot.getTxnId();
           validWriteIds = tableSnapshot.getValidWriteIdList();
         }
       }
 
       getMSC().renamePartition(tbl.getCatName(), tbl.getDbName(), tbl.getTableName(), pvals,
-          newPart.getTPartition(), txnId, validWriteIds);
+          newPart.getTPartition(), validWriteIds);
 
     } catch (InvalidOperationException e){
       throw new HiveException("Unable to rename partition. " + e.getMessage(), e);
@@ -1079,7 +1074,7 @@ public class Hive {
         getMSC().truncateTable(table.getDbName(), table.getTableName(), partNames);
       } else {
         getMSC().truncateTable(table.getDbName(), table.getTableName(), partNames,
-            snapshot.getTxnId(), snapshot.getValidWriteIdList(), snapshot.getWriteId());
+            snapshot.getValidWriteIdList(), snapshot.getWriteId());
       }
     } catch (Exception e) {
       throw new HiveException(e);
@@ -1185,7 +1180,7 @@ public class Hive {
           validWriteIdList = AcidUtils.getTableValidWriteIdListWithTxnList(conf,
               dbName, tableName);
         }
-        tTable = getMSC().getTable(dbName, tableName, txnId,
+        tTable = getMSC().getTable(getDefaultCatalog(conf), dbName, tableName,
             validWriteIdList != null ? validWriteIdList.toString() : null);
       } else {
         tTable = getMSC().getTable(dbName, tableName);
@@ -2092,7 +2087,6 @@ public class Hive {
     LOG.debug("Altering existing partition " + newTPart.getSpec());
     getSynchronizedMSC().alter_partition(
         tbl.getDbName(), tbl.getTableName(), newTPart.getTPartition(), new EnvironmentContext(),
-        tableSnapshot == null ? -1 : tableSnapshot.getTxnId(),
         tableSnapshot == null ? null : tableSnapshot.getValidWriteIdList());
   }
 
@@ -2614,7 +2608,7 @@ private void constructOneLBLocationMap(FileStatus fSta,
           out.add(new Partition(tbl, outPart));
         }
         getMSC().alter_partitions(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(),
-            partsToAlter, new EnvironmentContext(), -1, null, -1);
+            partsToAlter, new EnvironmentContext(), null, -1);
 
         for ( org.apache.hadoop.hive.metastore.api.Partition outPart :
         getMSC().getPartitionsByNames(addPartitionDesc.getDbName(), addPartitionDesc.getTableName(),part_names)){
@@ -4594,7 +4588,6 @@ private void constructOneLBLocationMap(FileStatus fSta,
       Table tbl = getTable(statsDesc.getDbName(), statsDesc.getTableName());
 
       AcidUtils.TableSnapshot tableSnapshot  = AcidUtils.getTableSnapshot(conf, tbl, true);
-      request.setTxnId(tableSnapshot != null ? tableSnapshot.getTxnId() : 0);
       request.setValidWriteIdList(tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
       request.setWriteId(tableSnapshot != null ? tableSnapshot.getWriteId() : 0);
       return getMSC().setPartitionColumnStatistics(request);
@@ -4613,11 +4606,8 @@ private void constructOneLBLocationMap(FileStatus fSta,
       if (checkTransactional) {
         Table tbl = getTable(dbName, tableName);
         AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl);
-        if (tableSnapshot.getTxnId() > 0) {
-          retv = getMSC().getTableColumnStatistics(dbName, tableName, colNames,
-              tableSnapshot != null ? tableSnapshot.getTxnId() : -1,
-              tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
-        }
+        retv = getMSC().getTableColumnStatistics(dbName, tableName, colNames,
+            tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
       } else {
         retv = getMSC().getTableColumnStatistics(dbName, tableName, colNames);
       }
@@ -4632,18 +4622,16 @@ private void constructOneLBLocationMap(FileStatus fSta,
       String dbName, String tableName, List<String> partNames, List<String> colNames,
       boolean checkTransactional)
       throws HiveException {
-    long txnId = -1;
     String writeIdList = null;
     try {
       if (checkTransactional) {
         Table tbl = getTable(dbName, tableName);
         AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl);
-        txnId = tableSnapshot != null ? tableSnapshot.getTxnId() : -1;
         writeIdList = tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null;
       }
 
-      return getMSC().getPartitionColumnStatistics(dbName, tableName, partNames, colNames,
-            txnId, writeIdList);
+      return getMSC().getPartitionColumnStatistics(
+          dbName, tableName, partNames, colNames, writeIdList);
     } catch (Exception e) {
       LOG.debug(StringUtils.stringifyException(e));
       throw new HiveException(e);
@@ -4652,17 +4640,14 @@ private void constructOneLBLocationMap(FileStatus fSta,
 
   public AggrStats getAggrColStatsFor(String dbName, String tblName,
      List<String> colNames, List<String> partName, boolean checkTransactional) {
-    long txnId = -1;
     String writeIdList = null;
     try {
       if (checkTransactional) {
         Table tbl = getTable(dbName, tblName);
         AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl);
-        txnId = tableSnapshot != null ? tableSnapshot.getTxnId() : -1;
         writeIdList = tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null;
       }
-      return getMSC().getAggrColStatsFor(dbName, tblName, colNames, partName,
-          txnId, writeIdList);
+      return getMSC().getAggrColStatsFor(dbName, tblName, colNames, partName, writeIdList);
     } catch (Exception e) {
       LOG.debug(StringUtils.stringifyException(e));
       return new AggrStats(new ArrayList<ColumnStatisticsObj>(),0);

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
index f7c9009..3240f2d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java
@@ -159,14 +159,14 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements I
 
   @Override
   public void truncateTable(String dbName, String tableName,
-      List<String> partNames, long txnId, String validWriteIds, long writeId)
+      List<String> partNames, String validWriteIds, long writeId)
       throws TException {
     org.apache.hadoop.hive.metastore.api.Table table = getTempTable(dbName, tableName);
     if (table != null) {
       truncateTempTable(table);
       return;
     }
-    super.truncateTable(dbName, tableName, partNames, txnId, validWriteIds, writeId);
+    super.truncateTable(dbName, tableName, partNames, validWriteIds, writeId);
   }
 
   @Override
@@ -362,7 +362,7 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements I
   @Override
   public void alter_table(String catName, String dbName, String tbl_name,
       org.apache.hadoop.hive.metastore.api.Table new_tbl,
-      EnvironmentContext envContext, long txnId, String validWriteIds)
+      EnvironmentContext envContext, String validWriteIds)
       throws InvalidOperationException, MetaException, TException {
     org.apache.hadoop.hive.metastore.api.Table old_tbl = getTempTable(dbName, tbl_name);
     if (old_tbl != null) {
@@ -370,8 +370,7 @@ public class SessionHiveMetaStoreClient extends HiveMetaStoreClient implements I
       alterTempTable(dbName, tbl_name, old_tbl, new_tbl, null);
       return;
     }
-    super.alter_table(catName, dbName, tbl_name, new_tbl, envContext, txnId,
-        validWriteIds);
+    super.alter_table(catName, dbName, tbl_name, new_tbl, envContext, validWriteIds);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
index 6c922ee..b8d4375 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/StatsOptimizer.java
@@ -475,7 +475,6 @@ public class StatsOptimizer extends Transform {
                     hive.getMSC().getTableColumnStatistics(
                       tbl.getDbName(), tbl.getTableName(),
                       Lists.newArrayList(colName),
-                      tableSnapshot != null ? tableSnapshot.getTxnId() : -1,
                       tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
                 if (stats.isEmpty()) {
                   Logger.debug("No stats for " + tbl.getTableName() + " column " + colName);
@@ -541,7 +540,6 @@ public class StatsOptimizer extends Transform {
                   hive.getMSC().getTableColumnStatistics(
                     tbl.getDbName(), tbl.getTableName(),
                     Lists.newArrayList(colName),
-                      tableSnapshot != null ? tableSnapshot.getTxnId() : -1,
                       tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
               if (stats.isEmpty()) {
                 Logger.debug("No stats for " + tbl.getTableName() + " column " + colName);
@@ -685,7 +683,6 @@ public class StatsOptimizer extends Transform {
               ColumnStatisticsData statData =
                   hive.getMSC().getTableColumnStatistics(
                     tbl.getDbName(), tbl.getTableName(), Lists.newArrayList(colName),
-                      tableSnapshot != null ? tableSnapshot.getTxnId() : -1,
                       tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null)
                     .get(0).getStatsData();
               String name = colDesc.getTypeString().toUpperCase();
@@ -923,7 +920,6 @@ public class StatsOptimizer extends Transform {
 
       Map<String, List<ColumnStatisticsObj>> result = hive.getMSC().getPartitionColumnStatistics(
           tbl.getDbName(), tbl.getTableName(), partNames, Lists.newArrayList(colName),
-          tableSnapshot != null ? tableSnapshot.getTxnId() : -1,
           tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
       if (result.size() != parts.size()) {
         Logger.debug("Received " + result.size() + " stats for " + parts.size() + " partitions");

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
index 39209b3..2e25ece 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
@@ -182,7 +182,6 @@ public class ColStatsProcessor implements IStatsProcessor {
     HiveTxnManager txnMgr = AcidUtils.isTransactionalTable(tbl)
         ? SessionState.get().getTxnMgr() : null;
     if (txnMgr != null) {
-      request.setTxnId(txnMgr.getCurrentTxnId());
       request.setValidWriteIdList(AcidUtils.getTableValidWriteIdList(conf,
           AcidUtils.getFullTableName(tbl.getDbName(), tbl.getTableName())).toString());
       request.setWriteId(txnMgr.getAllocatedTableWriteId(tbl.getDbName(), tbl.getTableName()));

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java
index 2e4ce11..f34cb61 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUpdaterThread.java
@@ -447,7 +447,7 @@ public class StatsUpdaterThread extends Thread implements MetaStoreThread {
     }
     // TODO: we should probably skip updating if writeId is from an active txn
     boolean isTxnValid = (writeIdString == null) || ObjectStore.isCurrentStatsValidForTheQuery(
-        conf, db, tbl, params, statsWriteId , 0, writeIdString, false);
+        conf, db, tbl, params, statsWriteId , writeIdString, false);
     return getExistingStatsToUpdate(existingStats, params, isTxnValid);
   }
 
@@ -472,7 +472,7 @@ public class StatsUpdaterThread extends Thread implements MetaStoreThread {
     }
     // TODO: we should probably skip updating if writeId is from an active txn
     if (writeIdString != null && !ObjectStore.isCurrentStatsValidForTheQuery(
-        conf, db, tbl, params, statsWriteId, 0, writeIdString, false)) {
+        conf, db, tbl, params, statsWriteId, writeIdString, false)) {
       return allCols;
     }
     List<String> colsToUpdate = new ArrayList<>();

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
index 236bb7a..8c33f6a 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
@@ -277,7 +277,7 @@ public class TestTxnCommands extends TxnCommandsBaseForTests {
     List<ColumnStatisticsObj> stats;
     validWriteIds = msClient.getValidWriteIds("default." + tableName).toString();
     stats = msClient.getTableColumnStatistics(
-        "default", tableName, Lists.newArrayList("a"), -1, validWriteIds);
+        "default", tableName, Lists.newArrayList("a"), validWriteIds);
     return stats;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java b/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java
index 6c768c0..55131f3 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java
@@ -153,13 +153,13 @@ public class TestStatsUpdaterThread {
     List<String> cols = Lists.newArrayList("s");
     String dbName = ss.getCurrentDatabase(), tblName = "simple_stats", fqName = dbName + "." + tblName;
     ValidWriteIdList initialWriteIds = msClient.getValidWriteIds(fqName);
-    verifyStatsUpToDate(tblName, cols, msClient, 0, initialWriteIds.toString(), true);
+    verifyStatsUpToDate(tblName, cols, msClient, initialWriteIds.toString(), true);
     assertFalse(su.runOneIteration());
     drainWorkQueue(su, 0);
 
     executeQuery("insert overwrite table simple_stats values ('test2')");
     ValidWriteIdList nextWriteIds = msClient.getValidWriteIds(fqName);
-    verifyStatsUpToDate(tblName, cols, msClient, 0, nextWriteIds.toString(), true);
+    verifyStatsUpToDate(tblName, cols, msClient, nextWriteIds.toString(), true);
     assertFalse(su.runOneIteration());
     drainWorkQueue(su, 0);
     String currentWriteIds = msClient.getValidWriteIds(fqName).toString();
@@ -171,17 +171,17 @@ public class TestStatsUpdaterThread {
     Table tbl = msClient.getTable(dbName, tblName);
     tbl.setWriteId(badWriteId);
     msClient.alter_table(
-        null, dbName, tblName, tbl, new EnvironmentContext(), -1, initialWriteIds.toString());
+        null, dbName, tblName, tbl, new EnvironmentContext(), initialWriteIds.toString());
 
     // Stats should not be valid.
-    verifyStatsUpToDate(tblName, cols, msClient, 0, currentWriteIds, false);
+    verifyStatsUpToDate(tblName, cols, msClient, currentWriteIds, false);
 
     // Analyze should not be able to set valid stats for a running txn.
     assertTrue(su.runOneIteration());
     drainWorkQueue(su);
 
     currentWriteIds = msClient.getValidWriteIds(fqName).toString();
-    verifyStatsUpToDate(tblName, cols, msClient, 0, currentWriteIds, false);
+    verifyStatsUpToDate(tblName, cols, msClient, currentWriteIds, false);
 
     msClient.abortTxns(Lists.newArrayList(badTxnId));
 
@@ -191,7 +191,7 @@ public class TestStatsUpdaterThread {
 
     // Stats will now be valid.
     currentWriteIds = msClient.getValidWriteIds(fqName).toString();
-    verifyStatsUpToDate(tblName, cols, msClient, 0, currentWriteIds, true);
+    verifyStatsUpToDate(tblName, cols, msClient, currentWriteIds, true);
 
     // Verify that incorrect stats from a valid write ID are also handled.
     badTxnId = msClient.openTxn("moo");
@@ -199,17 +199,17 @@ public class TestStatsUpdaterThread {
     tbl = msClient.getTable(dbName, tblName);
     tbl.setWriteId(badWriteId);
     StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE);
-    msClient.alter_table(null, dbName, tblName, tbl, new EnvironmentContext(), -1, initialWriteIds.toString());
+    msClient.alter_table(null, dbName, tblName, tbl, new EnvironmentContext(), initialWriteIds.toString());
 
     // Stats should not be valid.
-    verifyStatsUpToDate(tblName, cols, msClient, 0, currentWriteIds, false);
+    verifyStatsUpToDate(tblName, cols, msClient, currentWriteIds, false);
 
     // Analyze should not be able to set valid stats for a running txn.
     assertTrue(su.runOneIteration());
     drainWorkQueue(su);
 
     currentWriteIds = msClient.getValidWriteIds(fqName).toString();
-    verifyStatsUpToDate(tblName, cols, msClient, 0, currentWriteIds, false);
+    verifyStatsUpToDate(tblName, cols, msClient, currentWriteIds, false);
 
     msClient.commitTxn(badTxnId);
 
@@ -219,7 +219,7 @@ public class TestStatsUpdaterThread {
 
     // Stats will now be valid.
     currentWriteIds = msClient.getValidWriteIds(fqName).toString();
-    verifyStatsUpToDate(tblName, cols, msClient, 0, currentWriteIds, true);
+    verifyStatsUpToDate(tblName, cols, msClient, currentWriteIds, true);
 
     msClient.close();
   }
@@ -256,14 +256,14 @@ public class TestStatsUpdaterThread {
     String currentWriteIds = msClient.getValidWriteIds(fqName).toString();
     // To update write ID we need to specify the write ID list to validate concurrent writes.
     msClient.alter_partitions(dbName, tblName,
-        Lists.newArrayList(part1), null, -1, currentWriteIds, badWriteId);
+        Lists.newArrayList(part1), null, currentWriteIds, badWriteId);
     msClient.alter_partitions(dbName, tblName,
-        Lists.newArrayList(part2), null, -1, currentWriteIds, badWriteId);
+        Lists.newArrayList(part2), null, currentWriteIds, badWriteId);
 
     // We expect two partitions to be updated.
     Map<String, List<ColumnStatisticsObj>> stats = msClient.getPartitionColumnStatistics(
         dbName, tblName, Lists.newArrayList("p=1", "p=2", "p=3"),
-        Lists.newArrayList("s"), 0, currentWriteIds);
+        Lists.newArrayList("s"), currentWriteIds);
     assertEquals(1, stats.size());
 
     assertTrue(su.runOneIteration());
@@ -271,14 +271,14 @@ public class TestStatsUpdaterThread {
     // Analyze treats stats like data (new write ID), so stats still should not be valid.
     stats = msClient.getPartitionColumnStatistics(
         dbName, tblName, Lists.newArrayList("p=1", "p=2", "p=3"),
-        Lists.newArrayList("s"), 0, currentWriteIds);
+        Lists.newArrayList("s"), currentWriteIds);
     assertEquals(1, stats.size());
 
     // New reader.
     currentWriteIds = msClient.getValidWriteIds(fqName).toString();
     stats = msClient.getPartitionColumnStatistics(
         dbName, tblName, Lists.newArrayList("p=1", "p=2", "p=3"),
-        Lists.newArrayList("s"), 0, currentWriteIds);
+        Lists.newArrayList("s"), currentWriteIds);
     assertEquals(3, stats.size());
 
     msClient.close();
@@ -588,8 +588,8 @@ public class TestStatsUpdaterThread {
   }
 
   private void verifyStatsUpToDate(String tbl, List<String> cols, IMetaStoreClient msClient,
-      long txnId, String validWriteIds, boolean isUpToDate) throws Exception {
-    Table table = msClient.getTable(ss.getCurrentDatabase(), tbl, txnId, validWriteIds);
+      String validWriteIds, boolean isUpToDate) throws Exception {
+    Table table = msClient.getTable(ss.getCurrentCatalog(), ss.getCurrentDatabase(), tbl, validWriteIds);
     verifyStatsUpToDate(table.getParameters(), cols, isUpToDate);
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
index ec42631..469a9a8 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
@@ -44,8 +44,7 @@ import org.slf4j.LoggerFactory;
   private static final org.apache.thrift.protocol.TField IF_NOT_EXISTS_FIELD_DESC = new org.apache.thrift.protocol.TField("ifNotExists", org.apache.thrift.protocol.TType.BOOL, (short)4);
   private static final org.apache.thrift.protocol.TField NEED_RESULT_FIELD_DESC = new org.apache.thrift.protocol.TField("needResult", org.apache.thrift.protocol.TType.BOOL, (short)5);
   private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6);
-  private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)7);
-  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)8);
+  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)7);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -59,7 +58,6 @@ import org.slf4j.LoggerFactory;
   private boolean ifNotExists; // required
   private boolean needResult; // optional
   private String catName; // optional
-  private long txnId; // optional
   private String validWriteIdList; // optional
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
@@ -70,8 +68,7 @@ import org.slf4j.LoggerFactory;
     IF_NOT_EXISTS((short)4, "ifNotExists"),
     NEED_RESULT((short)5, "needResult"),
     CAT_NAME((short)6, "catName"),
-    TXN_ID((short)7, "txnId"),
-    VALID_WRITE_ID_LIST((short)8, "validWriteIdList");
+    VALID_WRITE_ID_LIST((short)7, "validWriteIdList");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -98,9 +95,7 @@ import org.slf4j.LoggerFactory;
           return NEED_RESULT;
         case 6: // CAT_NAME
           return CAT_NAME;
-        case 7: // TXN_ID
-          return TXN_ID;
-        case 8: // VALID_WRITE_ID_LIST
+        case 7: // VALID_WRITE_ID_LIST
           return VALID_WRITE_ID_LIST;
         default:
           return null;
@@ -144,9 +139,8 @@ import org.slf4j.LoggerFactory;
   // isset id assignments
   private static final int __IFNOTEXISTS_ISSET_ID = 0;
   private static final int __NEEDRESULT_ISSET_ID = 1;
-  private static final int __TXNID_ISSET_ID = 2;
   private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.NEED_RESULT,_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST};
+  private static final _Fields optionals[] = {_Fields.NEED_RESULT,_Fields.CAT_NAME,_Fields.VALID_WRITE_ID_LIST};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -163,8 +157,6 @@ import org.slf4j.LoggerFactory;
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
     tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
@@ -174,8 +166,6 @@ import org.slf4j.LoggerFactory;
   public AddPartitionsRequest() {
     this.needResult = true;
 
-    this.txnId = -1L;
-
   }
 
   public AddPartitionsRequest(
@@ -215,7 +205,6 @@ import org.slf4j.LoggerFactory;
     if (other.isSetCatName()) {
       this.catName = other.catName;
     }
-    this.txnId = other.txnId;
     if (other.isSetValidWriteIdList()) {
       this.validWriteIdList = other.validWriteIdList;
     }
@@ -235,8 +224,6 @@ import org.slf4j.LoggerFactory;
     this.needResult = true;
 
     this.catName = null;
-    this.txnId = -1L;
-
     this.validWriteIdList = null;
   }
 
@@ -391,28 +378,6 @@ import org.slf4j.LoggerFactory;
     }
   }
 
-  public long getTxnId() {
-    return this.txnId;
-  }
-
-  public void setTxnId(long txnId) {
-    this.txnId = txnId;
-    setTxnIdIsSet(true);
-  }
-
-  public void unsetTxnId() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
-  }
-
-  /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
-  public boolean isSetTxnId() {
-    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
-  }
-
-  public void setTxnIdIsSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
-  }
-
   public String getValidWriteIdList() {
     return this.validWriteIdList;
   }
@@ -486,14 +451,6 @@ import org.slf4j.LoggerFactory;
       }
       break;
 
-    case TXN_ID:
-      if (value == null) {
-        unsetTxnId();
-      } else {
-        setTxnId((Long)value);
-      }
-      break;
-
     case VALID_WRITE_ID_LIST:
       if (value == null) {
         unsetValidWriteIdList();
@@ -525,9 +482,6 @@ import org.slf4j.LoggerFactory;
     case CAT_NAME:
       return getCatName();
 
-    case TXN_ID:
-      return getTxnId();
-
     case VALID_WRITE_ID_LIST:
       return getValidWriteIdList();
 
@@ -554,8 +508,6 @@ import org.slf4j.LoggerFactory;
       return isSetNeedResult();
     case CAT_NAME:
       return isSetCatName();
-    case TXN_ID:
-      return isSetTxnId();
     case VALID_WRITE_ID_LIST:
       return isSetValidWriteIdList();
     }
@@ -629,15 +581,6 @@ import org.slf4j.LoggerFactory;
         return false;
     }
 
-    boolean this_present_txnId = true && this.isSetTxnId();
-    boolean that_present_txnId = true && that.isSetTxnId();
-    if (this_present_txnId || that_present_txnId) {
-      if (!(this_present_txnId && that_present_txnId))
-        return false;
-      if (this.txnId != that.txnId)
-        return false;
-    }
-
     boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
     boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
     if (this_present_validWriteIdList || that_present_validWriteIdList) {
@@ -684,11 +627,6 @@ import org.slf4j.LoggerFactory;
     if (present_catName)
       list.add(catName);
 
-    boolean present_txnId = true && (isSetTxnId());
-    list.add(present_txnId);
-    if (present_txnId)
-      list.add(txnId);
-
     boolean present_validWriteIdList = true && (isSetValidWriteIdList());
     list.add(present_validWriteIdList);
     if (present_validWriteIdList)
@@ -765,16 +703,6 @@ import org.slf4j.LoggerFactory;
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetTxnId()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
     lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
     if (lastComparison != 0) {
       return lastComparison;
@@ -848,12 +776,6 @@ import org.slf4j.LoggerFactory;
       }
       first = false;
     }
-    if (isSetTxnId()) {
-      if (!first) sb.append(", ");
-      sb.append("txnId:");
-      sb.append(this.txnId);
-      first = false;
-    }
     if (isSetValidWriteIdList()) {
       if (!first) sb.append(", ");
       sb.append("validWriteIdList:");
@@ -984,15 +906,7 @@ import org.slf4j.LoggerFactory;
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
-          case 7: // TXN_ID
-            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
-              struct.txnId = iprot.readI64();
-              struct.setTxnIdIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 8: // VALID_WRITE_ID_LIST
+          case 7: // VALID_WRITE_ID_LIST
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.validWriteIdList = iprot.readString();
               struct.setValidWriteIdListIsSet(true);
@@ -1050,11 +964,6 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldEnd();
         }
       }
-      if (struct.isSetTxnId()) {
-        oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
-        oprot.writeI64(struct.txnId);
-        oprot.writeFieldEnd();
-      }
       if (struct.validWriteIdList != null) {
         if (struct.isSetValidWriteIdList()) {
           oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
@@ -1096,22 +1005,16 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetCatName()) {
         optionals.set(1);
       }
-      if (struct.isSetTxnId()) {
-        optionals.set(2);
-      }
       if (struct.isSetValidWriteIdList()) {
-        optionals.set(3);
+        optionals.set(2);
       }
-      oprot.writeBitSet(optionals, 4);
+      oprot.writeBitSet(optionals, 3);
       if (struct.isSetNeedResult()) {
         oprot.writeBool(struct.needResult);
       }
       if (struct.isSetCatName()) {
         oprot.writeString(struct.catName);
       }
-      if (struct.isSetTxnId()) {
-        oprot.writeI64(struct.txnId);
-      }
       if (struct.isSetValidWriteIdList()) {
         oprot.writeString(struct.validWriteIdList);
       }
@@ -1138,7 +1041,7 @@ import org.slf4j.LoggerFactory;
       struct.setPartsIsSet(true);
       struct.ifNotExists = iprot.readBool();
       struct.setIfNotExistsIsSet(true);
-      BitSet incoming = iprot.readBitSet(4);
+      BitSet incoming = iprot.readBitSet(3);
       if (incoming.get(0)) {
         struct.needResult = iprot.readBool();
         struct.setNeedResultIsSet(true);
@@ -1148,10 +1051,6 @@ import org.slf4j.LoggerFactory;
         struct.setCatNameIsSet(true);
       }
       if (incoming.get(2)) {
-        struct.txnId = iprot.readI64();
-        struct.setTxnIdIsSet(true);
-      }
-      if (incoming.get(3)) {
         struct.validWriteIdList = iprot.readString();
         struct.setValidWriteIdListIsSet(true);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
index 45832a4..4d4595a 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
@@ -43,9 +43,8 @@ import org.slf4j.LoggerFactory;
   private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)3);
   private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)4);
   private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environmentContext", org.apache.thrift.protocol.TType.STRUCT, (short)5);
-  private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)6);
-  private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)7);
-  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)8);
+  private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)6);
+  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)7);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -58,7 +57,6 @@ import org.slf4j.LoggerFactory;
   private String tableName; // required
   private List<Partition> partitions; // required
   private EnvironmentContext environmentContext; // optional
-  private long txnId; // optional
   private long writeId; // optional
   private String validWriteIdList; // optional
 
@@ -69,9 +67,8 @@ import org.slf4j.LoggerFactory;
     TABLE_NAME((short)3, "tableName"),
     PARTITIONS((short)4, "partitions"),
     ENVIRONMENT_CONTEXT((short)5, "environmentContext"),
-    TXN_ID((short)6, "txnId"),
-    WRITE_ID((short)7, "writeId"),
-    VALID_WRITE_ID_LIST((short)8, "validWriteIdList");
+    WRITE_ID((short)6, "writeId"),
+    VALID_WRITE_ID_LIST((short)7, "validWriteIdList");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -96,11 +93,9 @@ import org.slf4j.LoggerFactory;
           return PARTITIONS;
         case 5: // ENVIRONMENT_CONTEXT
           return ENVIRONMENT_CONTEXT;
-        case 6: // TXN_ID
-          return TXN_ID;
-        case 7: // WRITE_ID
+        case 6: // WRITE_ID
           return WRITE_ID;
-        case 8: // VALID_WRITE_ID_LIST
+        case 7: // VALID_WRITE_ID_LIST
           return VALID_WRITE_ID_LIST;
         default:
           return null;
@@ -142,10 +137,9 @@ import org.slf4j.LoggerFactory;
   }
 
   // isset id assignments
-  private static final int __TXNID_ISSET_ID = 0;
-  private static final int __WRITEID_ISSET_ID = 1;
+  private static final int __WRITEID_ISSET_ID = 0;
   private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.ENVIRONMENT_CONTEXT,_Fields.TXN_ID,_Fields.WRITE_ID,_Fields.VALID_WRITE_ID_LIST};
+  private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.ENVIRONMENT_CONTEXT,_Fields.WRITE_ID,_Fields.VALID_WRITE_ID_LIST};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -160,8 +154,6 @@ import org.slf4j.LoggerFactory;
             new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class))));
     tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environmentContext", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class)));
-    tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
@@ -171,8 +163,6 @@ import org.slf4j.LoggerFactory;
   }
 
   public AlterPartitionsRequest() {
-    this.txnId = -1L;
-
     this.writeId = -1L;
 
   }
@@ -212,7 +202,6 @@ import org.slf4j.LoggerFactory;
     if (other.isSetEnvironmentContext()) {
       this.environmentContext = new EnvironmentContext(other.environmentContext);
     }
-    this.txnId = other.txnId;
     this.writeId = other.writeId;
     if (other.isSetValidWriteIdList()) {
       this.validWriteIdList = other.validWriteIdList;
@@ -230,8 +219,6 @@ import org.slf4j.LoggerFactory;
     this.tableName = null;
     this.partitions = null;
     this.environmentContext = null;
-    this.txnId = -1L;
-
     this.writeId = -1L;
 
     this.validWriteIdList = null;
@@ -367,28 +354,6 @@ import org.slf4j.LoggerFactory;
     }
   }
 
-  public long getTxnId() {
-    return this.txnId;
-  }
-
-  public void setTxnId(long txnId) {
-    this.txnId = txnId;
-    setTxnIdIsSet(true);
-  }
-
-  public void unsetTxnId() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
-  }
-
-  /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
-  public boolean isSetTxnId() {
-    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
-  }
-
-  public void setTxnIdIsSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
-  }
-
   public long getWriteId() {
     return this.writeId;
   }
@@ -476,14 +441,6 @@ import org.slf4j.LoggerFactory;
       }
       break;
 
-    case TXN_ID:
-      if (value == null) {
-        unsetTxnId();
-      } else {
-        setTxnId((Long)value);
-      }
-      break;
-
     case WRITE_ID:
       if (value == null) {
         unsetWriteId();
@@ -520,9 +477,6 @@ import org.slf4j.LoggerFactory;
     case ENVIRONMENT_CONTEXT:
       return getEnvironmentContext();
 
-    case TXN_ID:
-      return getTxnId();
-
     case WRITE_ID:
       return getWriteId();
 
@@ -550,8 +504,6 @@ import org.slf4j.LoggerFactory;
       return isSetPartitions();
     case ENVIRONMENT_CONTEXT:
       return isSetEnvironmentContext();
-    case TXN_ID:
-      return isSetTxnId();
     case WRITE_ID:
       return isSetWriteId();
     case VALID_WRITE_ID_LIST:
@@ -618,15 +570,6 @@ import org.slf4j.LoggerFactory;
         return false;
     }
 
-    boolean this_present_txnId = true && this.isSetTxnId();
-    boolean that_present_txnId = true && that.isSetTxnId();
-    if (this_present_txnId || that_present_txnId) {
-      if (!(this_present_txnId && that_present_txnId))
-        return false;
-      if (this.txnId != that.txnId)
-        return false;
-    }
-
     boolean this_present_writeId = true && this.isSetWriteId();
     boolean that_present_writeId = true && that.isSetWriteId();
     if (this_present_writeId || that_present_writeId) {
@@ -677,11 +620,6 @@ import org.slf4j.LoggerFactory;
     if (present_environmentContext)
       list.add(environmentContext);
 
-    boolean present_txnId = true && (isSetTxnId());
-    list.add(present_txnId);
-    if (present_txnId)
-      list.add(txnId);
-
     boolean present_writeId = true && (isSetWriteId());
     list.add(present_writeId);
     if (present_writeId)
@@ -753,16 +691,6 @@ import org.slf4j.LoggerFactory;
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetTxnId()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
     lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId());
     if (lastComparison != 0) {
       return lastComparison;
@@ -846,12 +774,6 @@ import org.slf4j.LoggerFactory;
       }
       first = false;
     }
-    if (isSetTxnId()) {
-      if (!first) sb.append(", ");
-      sb.append("txnId:");
-      sb.append(this.txnId);
-      first = false;
-    }
     if (isSetWriteId()) {
       if (!first) sb.append(", ");
       sb.append("writeId:");
@@ -980,15 +902,7 @@ import org.slf4j.LoggerFactory;
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
-          case 6: // TXN_ID
-            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
-              struct.txnId = iprot.readI64();
-              struct.setTxnIdIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 7: // WRITE_ID
+          case 6: // WRITE_ID
             if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
               struct.writeId = iprot.readI64();
               struct.setWriteIdIsSet(true);
@@ -996,7 +910,7 @@ import org.slf4j.LoggerFactory;
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
-          case 8: // VALID_WRITE_ID_LIST
+          case 7: // VALID_WRITE_ID_LIST
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.validWriteIdList = iprot.readString();
               struct.setValidWriteIdListIsSet(true);
@@ -1053,11 +967,6 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldEnd();
         }
       }
-      if (struct.isSetTxnId()) {
-        oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
-        oprot.writeI64(struct.txnId);
-        oprot.writeFieldEnd();
-      }
       if (struct.isSetWriteId()) {
         oprot.writeFieldBegin(WRITE_ID_FIELD_DESC);
         oprot.writeI64(struct.writeId);
@@ -1103,25 +1012,19 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetEnvironmentContext()) {
         optionals.set(1);
       }
-      if (struct.isSetTxnId()) {
-        optionals.set(2);
-      }
       if (struct.isSetWriteId()) {
-        optionals.set(3);
+        optionals.set(2);
       }
       if (struct.isSetValidWriteIdList()) {
-        optionals.set(4);
+        optionals.set(3);
       }
-      oprot.writeBitSet(optionals, 5);
+      oprot.writeBitSet(optionals, 4);
       if (struct.isSetCatName()) {
         oprot.writeString(struct.catName);
       }
       if (struct.isSetEnvironmentContext()) {
         struct.environmentContext.write(oprot);
       }
-      if (struct.isSetTxnId()) {
-        oprot.writeI64(struct.txnId);
-      }
       if (struct.isSetWriteId()) {
         oprot.writeI64(struct.writeId);
       }
@@ -1149,7 +1052,7 @@ import org.slf4j.LoggerFactory;
         }
       }
       struct.setPartitionsIsSet(true);
-      BitSet incoming = iprot.readBitSet(5);
+      BitSet incoming = iprot.readBitSet(4);
       if (incoming.get(0)) {
         struct.catName = iprot.readString();
         struct.setCatNameIsSet(true);
@@ -1160,14 +1063,10 @@ import org.slf4j.LoggerFactory;
         struct.setEnvironmentContextIsSet(true);
       }
       if (incoming.get(2)) {
-        struct.txnId = iprot.readI64();
-        struct.setTxnIdIsSet(true);
-      }
-      if (incoming.get(3)) {
         struct.writeId = iprot.readI64();
         struct.setWriteIdIsSet(true);
       }
-      if (incoming.get(4)) {
+      if (incoming.get(3)) {
         struct.validWriteIdList = iprot.readString();
         struct.setValidWriteIdListIsSet(true);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterTableRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterTableRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterTableRequest.java
index 6a1efd8..df295c7 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterTableRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterTableRequest.java
@@ -43,9 +43,8 @@ import org.slf4j.LoggerFactory;
   private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)3);
   private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRUCT, (short)4);
   private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environmentContext", org.apache.thrift.protocol.TType.STRUCT, (short)5);
-  private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)6);
-  private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)7);
-  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)8);
+  private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)6);
+  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)7);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -58,7 +57,6 @@ import org.slf4j.LoggerFactory;
   private String tableName; // required
   private Table table; // required
   private EnvironmentContext environmentContext; // optional
-  private long txnId; // optional
   private long writeId; // optional
   private String validWriteIdList; // optional
 
@@ -69,9 +67,8 @@ import org.slf4j.LoggerFactory;
     TABLE_NAME((short)3, "tableName"),
     TABLE((short)4, "table"),
     ENVIRONMENT_CONTEXT((short)5, "environmentContext"),
-    TXN_ID((short)6, "txnId"),
-    WRITE_ID((short)7, "writeId"),
-    VALID_WRITE_ID_LIST((short)8, "validWriteIdList");
+    WRITE_ID((short)6, "writeId"),
+    VALID_WRITE_ID_LIST((short)7, "validWriteIdList");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -96,11 +93,9 @@ import org.slf4j.LoggerFactory;
           return TABLE;
         case 5: // ENVIRONMENT_CONTEXT
           return ENVIRONMENT_CONTEXT;
-        case 6: // TXN_ID
-          return TXN_ID;
-        case 7: // WRITE_ID
+        case 6: // WRITE_ID
           return WRITE_ID;
-        case 8: // VALID_WRITE_ID_LIST
+        case 7: // VALID_WRITE_ID_LIST
           return VALID_WRITE_ID_LIST;
         default:
           return null;
@@ -142,10 +137,9 @@ import org.slf4j.LoggerFactory;
   }
 
   // isset id assignments
-  private static final int __TXNID_ISSET_ID = 0;
-  private static final int __WRITEID_ISSET_ID = 1;
+  private static final int __WRITEID_ISSET_ID = 0;
   private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.ENVIRONMENT_CONTEXT,_Fields.TXN_ID,_Fields.WRITE_ID,_Fields.VALID_WRITE_ID_LIST};
+  private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.ENVIRONMENT_CONTEXT,_Fields.WRITE_ID,_Fields.VALID_WRITE_ID_LIST};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -159,8 +153,6 @@ import org.slf4j.LoggerFactory;
         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Table.class)));
     tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environmentContext", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class)));
-    tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
@@ -170,8 +162,6 @@ import org.slf4j.LoggerFactory;
   }
 
   public AlterTableRequest() {
-    this.txnId = -1L;
-
     this.writeId = -1L;
 
   }
@@ -207,7 +197,6 @@ import org.slf4j.LoggerFactory;
     if (other.isSetEnvironmentContext()) {
       this.environmentContext = new EnvironmentContext(other.environmentContext);
     }
-    this.txnId = other.txnId;
     this.writeId = other.writeId;
     if (other.isSetValidWriteIdList()) {
       this.validWriteIdList = other.validWriteIdList;
@@ -225,8 +214,6 @@ import org.slf4j.LoggerFactory;
     this.tableName = null;
     this.table = null;
     this.environmentContext = null;
-    this.txnId = -1L;
-
     this.writeId = -1L;
 
     this.validWriteIdList = null;
@@ -347,28 +334,6 @@ import org.slf4j.LoggerFactory;
     }
   }
 
-  public long getTxnId() {
-    return this.txnId;
-  }
-
-  public void setTxnId(long txnId) {
-    this.txnId = txnId;
-    setTxnIdIsSet(true);
-  }
-
-  public void unsetTxnId() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
-  }
-
-  /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
-  public boolean isSetTxnId() {
-    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
-  }
-
-  public void setTxnIdIsSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
-  }
-
   public long getWriteId() {
     return this.writeId;
   }
@@ -456,14 +421,6 @@ import org.slf4j.LoggerFactory;
       }
       break;
 
-    case TXN_ID:
-      if (value == null) {
-        unsetTxnId();
-      } else {
-        setTxnId((Long)value);
-      }
-      break;
-
     case WRITE_ID:
       if (value == null) {
         unsetWriteId();
@@ -500,9 +457,6 @@ import org.slf4j.LoggerFactory;
     case ENVIRONMENT_CONTEXT:
       return getEnvironmentContext();
 
-    case TXN_ID:
-      return getTxnId();
-
     case WRITE_ID:
       return getWriteId();
 
@@ -530,8 +484,6 @@ import org.slf4j.LoggerFactory;
       return isSetTable();
     case ENVIRONMENT_CONTEXT:
       return isSetEnvironmentContext();
-    case TXN_ID:
-      return isSetTxnId();
     case WRITE_ID:
       return isSetWriteId();
     case VALID_WRITE_ID_LIST:
@@ -598,15 +550,6 @@ import org.slf4j.LoggerFactory;
         return false;
     }
 
-    boolean this_present_txnId = true && this.isSetTxnId();
-    boolean that_present_txnId = true && that.isSetTxnId();
-    if (this_present_txnId || that_present_txnId) {
-      if (!(this_present_txnId && that_present_txnId))
-        return false;
-      if (this.txnId != that.txnId)
-        return false;
-    }
-
     boolean this_present_writeId = true && this.isSetWriteId();
     boolean that_present_writeId = true && that.isSetWriteId();
     if (this_present_writeId || that_present_writeId) {
@@ -657,11 +600,6 @@ import org.slf4j.LoggerFactory;
     if (present_environmentContext)
       list.add(environmentContext);
 
-    boolean present_txnId = true && (isSetTxnId());
-    list.add(present_txnId);
-    if (present_txnId)
-      list.add(txnId);
-
     boolean present_writeId = true && (isSetWriteId());
     list.add(present_writeId);
     if (present_writeId)
@@ -733,16 +671,6 @@ import org.slf4j.LoggerFactory;
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetTxnId()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
     lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId());
     if (lastComparison != 0) {
       return lastComparison;
@@ -826,12 +754,6 @@ import org.slf4j.LoggerFactory;
       }
       first = false;
     }
-    if (isSetTxnId()) {
-      if (!first) sb.append(", ");
-      sb.append("txnId:");
-      sb.append(this.txnId);
-      first = false;
-    }
     if (isSetWriteId()) {
       if (!first) sb.append(", ");
       sb.append("writeId:");
@@ -953,15 +875,7 @@ import org.slf4j.LoggerFactory;
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
-          case 6: // TXN_ID
-            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
-              struct.txnId = iprot.readI64();
-              struct.setTxnIdIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 7: // WRITE_ID
+          case 6: // WRITE_ID
             if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
               struct.writeId = iprot.readI64();
               struct.setWriteIdIsSet(true);
@@ -969,7 +883,7 @@ import org.slf4j.LoggerFactory;
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
-          case 8: // VALID_WRITE_ID_LIST
+          case 7: // VALID_WRITE_ID_LIST
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.validWriteIdList = iprot.readString();
               struct.setValidWriteIdListIsSet(true);
@@ -1019,11 +933,6 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldEnd();
         }
       }
-      if (struct.isSetTxnId()) {
-        oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
-        oprot.writeI64(struct.txnId);
-        oprot.writeFieldEnd();
-      }
       if (struct.isSetWriteId()) {
         oprot.writeFieldBegin(WRITE_ID_FIELD_DESC);
         oprot.writeI64(struct.writeId);
@@ -1063,25 +972,19 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetEnvironmentContext()) {
         optionals.set(1);
       }
-      if (struct.isSetTxnId()) {
-        optionals.set(2);
-      }
       if (struct.isSetWriteId()) {
-        optionals.set(3);
+        optionals.set(2);
       }
       if (struct.isSetValidWriteIdList()) {
-        optionals.set(4);
+        optionals.set(3);
       }
-      oprot.writeBitSet(optionals, 5);
+      oprot.writeBitSet(optionals, 4);
       if (struct.isSetCatName()) {
         oprot.writeString(struct.catName);
       }
       if (struct.isSetEnvironmentContext()) {
         struct.environmentContext.write(oprot);
       }
-      if (struct.isSetTxnId()) {
-        oprot.writeI64(struct.txnId);
-      }
       if (struct.isSetWriteId()) {
         oprot.writeI64(struct.writeId);
       }
@@ -1100,7 +1003,7 @@ import org.slf4j.LoggerFactory;
       struct.table = new Table();
       struct.table.read(iprot);
       struct.setTableIsSet(true);
-      BitSet incoming = iprot.readBitSet(5);
+      BitSet incoming = iprot.readBitSet(4);
       if (incoming.get(0)) {
         struct.catName = iprot.readString();
         struct.setCatNameIsSet(true);
@@ -1111,14 +1014,10 @@ import org.slf4j.LoggerFactory;
         struct.setEnvironmentContextIsSet(true);
       }
       if (incoming.get(2)) {
-        struct.txnId = iprot.readI64();
-        struct.setTxnIdIsSet(true);
-      }
-      if (incoming.get(3)) {
         struct.writeId = iprot.readI64();
         struct.setWriteIdIsSet(true);
       }
-      if (incoming.get(4)) {
+      if (incoming.get(3)) {
         struct.validWriteIdList = iprot.readString();
         struct.setValidWriteIdListIsSet(true);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
index 821049e..2804952 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
@@ -42,7 +42,6 @@ import org.slf4j.LoggerFactory;
   private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2);
   private static final org.apache.thrift.protocol.TField CAPABILITIES_FIELD_DESC = new org.apache.thrift.protocol.TField("capabilities", org.apache.thrift.protocol.TType.STRUCT, (short)3);
   private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4);
-  private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)5);
   private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)6);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
@@ -55,7 +54,6 @@ import org.slf4j.LoggerFactory;
   private String tblName; // required
   private ClientCapabilities capabilities; // optional
   private String catName; // optional
-  private long txnId; // optional
   private String validWriteIdList; // optional
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
@@ -64,7 +62,6 @@ import org.slf4j.LoggerFactory;
     TBL_NAME((short)2, "tblName"),
     CAPABILITIES((short)3, "capabilities"),
     CAT_NAME((short)4, "catName"),
-    TXN_ID((short)5, "txnId"),
     VALID_WRITE_ID_LIST((short)6, "validWriteIdList");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
@@ -88,8 +85,6 @@ import org.slf4j.LoggerFactory;
           return CAPABILITIES;
         case 4: // CAT_NAME
           return CAT_NAME;
-        case 5: // TXN_ID
-          return TXN_ID;
         case 6: // VALID_WRITE_ID_LIST
           return VALID_WRITE_ID_LIST;
         default:
@@ -132,9 +127,7 @@ import org.slf4j.LoggerFactory;
   }
 
   // isset id assignments
-  private static final int __TXNID_ISSET_ID = 0;
-  private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.CAPABILITIES,_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST};
+  private static final _Fields optionals[] = {_Fields.CAPABILITIES,_Fields.CAT_NAME,_Fields.VALID_WRITE_ID_LIST};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -146,8 +139,6 @@ import org.slf4j.LoggerFactory;
         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClientCapabilities.class)));
     tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
@@ -155,8 +146,6 @@ import org.slf4j.LoggerFactory;
   }
 
   public GetTableRequest() {
-    this.txnId = -1L;
-
   }
 
   public GetTableRequest(
@@ -172,7 +161,6 @@ import org.slf4j.LoggerFactory;
    * Performs a deep copy on <i>other</i>.
    */
   public GetTableRequest(GetTableRequest other) {
-    __isset_bitfield = other.__isset_bitfield;
     if (other.isSetDbName()) {
       this.dbName = other.dbName;
     }
@@ -185,7 +173,6 @@ import org.slf4j.LoggerFactory;
     if (other.isSetCatName()) {
       this.catName = other.catName;
     }
-    this.txnId = other.txnId;
     if (other.isSetValidWriteIdList()) {
       this.validWriteIdList = other.validWriteIdList;
     }
@@ -201,8 +188,6 @@ import org.slf4j.LoggerFactory;
     this.tblName = null;
     this.capabilities = null;
     this.catName = null;
-    this.txnId = -1L;
-
     this.validWriteIdList = null;
   }
 
@@ -298,28 +283,6 @@ import org.slf4j.LoggerFactory;
     }
   }
 
-  public long getTxnId() {
-    return this.txnId;
-  }
-
-  public void setTxnId(long txnId) {
-    this.txnId = txnId;
-    setTxnIdIsSet(true);
-  }
-
-  public void unsetTxnId() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
-  }
-
-  /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
-  public boolean isSetTxnId() {
-    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
-  }
-
-  public void setTxnIdIsSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
-  }
-
   public String getValidWriteIdList() {
     return this.validWriteIdList;
   }
@@ -377,14 +340,6 @@ import org.slf4j.LoggerFactory;
       }
       break;
 
-    case TXN_ID:
-      if (value == null) {
-        unsetTxnId();
-      } else {
-        setTxnId((Long)value);
-      }
-      break;
-
     case VALID_WRITE_ID_LIST:
       if (value == null) {
         unsetValidWriteIdList();
@@ -410,9 +365,6 @@ import org.slf4j.LoggerFactory;
     case CAT_NAME:
       return getCatName();
 
-    case TXN_ID:
-      return getTxnId();
-
     case VALID_WRITE_ID_LIST:
       return getValidWriteIdList();
 
@@ -435,8 +387,6 @@ import org.slf4j.LoggerFactory;
       return isSetCapabilities();
     case CAT_NAME:
       return isSetCatName();
-    case TXN_ID:
-      return isSetTxnId();
     case VALID_WRITE_ID_LIST:
       return isSetValidWriteIdList();
     }
@@ -492,15 +442,6 @@ import org.slf4j.LoggerFactory;
         return false;
     }
 
-    boolean this_present_txnId = true && this.isSetTxnId();
-    boolean that_present_txnId = true && that.isSetTxnId();
-    if (this_present_txnId || that_present_txnId) {
-      if (!(this_present_txnId && that_present_txnId))
-        return false;
-      if (this.txnId != that.txnId)
-        return false;
-    }
-
     boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
     boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
     if (this_present_validWriteIdList || that_present_validWriteIdList) {
@@ -537,11 +478,6 @@ import org.slf4j.LoggerFactory;
     if (present_catName)
       list.add(catName);
 
-    boolean present_txnId = true && (isSetTxnId());
-    list.add(present_txnId);
-    if (present_txnId)
-      list.add(txnId);
-
     boolean present_validWriteIdList = true && (isSetValidWriteIdList());
     list.add(present_validWriteIdList);
     if (present_validWriteIdList)
@@ -598,16 +534,6 @@ import org.slf4j.LoggerFactory;
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetTxnId()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
     lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
     if (lastComparison != 0) {
       return lastComparison;
@@ -673,12 +599,6 @@ import org.slf4j.LoggerFactory;
       }
       first = false;
     }
-    if (isSetTxnId()) {
-      if (!first) sb.append(", ");
-      sb.append("txnId:");
-      sb.append(this.txnId);
-      first = false;
-    }
     if (isSetValidWriteIdList()) {
       if (!first) sb.append(", ");
       sb.append("validWriteIdList:");
@@ -719,8 +639,6 @@ import org.slf4j.LoggerFactory;
 
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
-      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
-      __isset_bitfield = 0;
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
     } catch (org.apache.thrift.TException te) {
       throw new java.io.IOException(te);
@@ -778,14 +696,6 @@ import org.slf4j.LoggerFactory;
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
-          case 5: // TXN_ID
-            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
-              struct.txnId = iprot.readI64();
-              struct.setTxnIdIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
           case 6: // VALID_WRITE_ID_LIST
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.validWriteIdList = iprot.readString();
@@ -831,11 +741,6 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldEnd();
         }
       }
-      if (struct.isSetTxnId()) {
-        oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
-        oprot.writeI64(struct.txnId);
-        oprot.writeFieldEnd();
-      }
       if (struct.validWriteIdList != null) {
         if (struct.isSetValidWriteIdList()) {
           oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
@@ -869,22 +774,16 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetCatName()) {
         optionals.set(1);
       }
-      if (struct.isSetTxnId()) {
-        optionals.set(2);
-      }
       if (struct.isSetValidWriteIdList()) {
-        optionals.set(3);
+        optionals.set(2);
       }
-      oprot.writeBitSet(optionals, 4);
+      oprot.writeBitSet(optionals, 3);
       if (struct.isSetCapabilities()) {
         struct.capabilities.write(oprot);
       }
       if (struct.isSetCatName()) {
         oprot.writeString(struct.catName);
       }
-      if (struct.isSetTxnId()) {
-        oprot.writeI64(struct.txnId);
-      }
       if (struct.isSetValidWriteIdList()) {
         oprot.writeString(struct.validWriteIdList);
       }
@@ -897,7 +796,7 @@ import org.slf4j.LoggerFactory;
       struct.setDbNameIsSet(true);
       struct.tblName = iprot.readString();
       struct.setTblNameIsSet(true);
-      BitSet incoming = iprot.readBitSet(4);
+      BitSet incoming = iprot.readBitSet(3);
       if (incoming.get(0)) {
         struct.capabilities = new ClientCapabilities();
         struct.capabilities.read(iprot);
@@ -908,10 +807,6 @@ import org.slf4j.LoggerFactory;
         struct.setCatNameIsSet(true);
       }
       if (incoming.get(2)) {
-        struct.txnId = iprot.readI64();
-        struct.setTxnIdIsSet(true);
-      }
-      if (incoming.get(3)) {
         struct.validWriteIdList = iprot.readString();
         struct.setValidWriteIdListIsSet(true);
       }


[04/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
index 0000000,fb4a761..fe64a91
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
@@@ -1,0 -1,471 +1,472 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.Iterator;
+ import java.util.List;
+ import java.util.Map;
+ 
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+ import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent;
+ import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
+ import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+ import org.apache.hadoop.hive.metastore.events.ListenerEvent;
+ import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAddPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAlterPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent;
+ import org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropTableEvent;
+ import org.apache.hadoop.hive.metastore.events.PreEventContext;
+ import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent;
+ import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+ import org.junit.Assert;
+ import org.junit.Before;
+ import org.junit.Test;
+ 
+ import static org.junit.Assert.assertEquals;
+ import static org.junit.Assert.assertFalse;
+ import static org.junit.Assert.assertNotSame;
+ import static org.junit.Assert.assertTrue;
+ 
+ import com.google.common.collect.Lists;
+ 
+ import org.junit.experimental.categories.Category;
+ 
+ /**
+  * TestMetaStoreEventListener. Test case for
+  * {@link org.apache.hadoop.hive.metastore.MetaStoreEventListener} and
+  * {@link org.apache.hadoop.hive.metastore.MetaStorePreEventListener}
+  */
+ @Category(MetastoreUnitTest.class)
+ public class TestMetaStoreEventListener {
+   private Configuration conf;
+   private HiveMetaStoreClient msc;
+ 
+   private static final String dbName = "hive2038";
+   private static final String tblName = "tmptbl";
+   private static final String renamed = "tmptbl2";
+   private static final String metaConfKey = "metastore.partition.name.whitelist.pattern";
+   private static final String metaConfVal = "";
+ 
+   @Before
+   public void setUp() throws Exception {
+     System.setProperty("hive.metastore.event.listeners",
+         DummyListener.class.getName());
+     System.setProperty("hive.metastore.pre.event.listeners",
+         DummyPreListener.class.getName());
+ 
+     conf = MetastoreConf.newMetastoreConf();
+ 
+     MetastoreConf.setVar(conf, ConfVars.PARTITION_NAME_WHITELIST_PATTERN, metaConfVal);
+     MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
+     MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+     MetaStoreTestUtils.setConfForStandloneMode(conf);
+     MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf);
+ 
+     msc = new HiveMetaStoreClient(conf);
+ 
+     msc.dropDatabase(dbName, true, true, true);
+     DummyListener.notifyList.clear();
+     DummyPreListener.notifyList.clear();
+   }
+ 
+   private void validateCreateDb(Database expectedDb, Database actualDb) {
+     assertEquals(expectedDb.getName(), actualDb.getName());
+     assertEquals(expectedDb.getLocationUri(), actualDb.getLocationUri());
+   }
+ 
+   private void validateTable(Table expectedTable, Table actualTable) {
+     assertEquals(expectedTable.getTableName(), actualTable.getTableName());
+     assertEquals(expectedTable.getDbName(), actualTable.getDbName());
+     assertEquals(expectedTable.getSd().getLocation(), actualTable.getSd().getLocation());
+   }
+ 
+   private void validateCreateTable(Table expectedTable, Table actualTable) {
+     validateTable(expectedTable, actualTable);
+   }
+ 
+   private void validateAddPartition(Partition expectedPartition, Partition actualPartition) {
+     assertEquals(expectedPartition, actualPartition);
+   }
+ 
+   private void validateTableInAddPartition(Table expectedTable, Table actualTable) {
+     assertEquals(expectedTable, actualTable);
+   }
+ 
+   private void validatePartition(Partition expectedPartition, Partition actualPartition) {
+     assertEquals(expectedPartition.getValues(), actualPartition.getValues());
+     assertEquals(expectedPartition.getDbName(), actualPartition.getDbName());
+     assertEquals(expectedPartition.getTableName(), actualPartition.getTableName());
+   }
+ 
+   private void validateAlterPartition(Partition expectedOldPartition,
+       Partition expectedNewPartition, String actualOldPartitionDbName,
+       String actualOldPartitionTblName,List<String> actualOldPartitionValues,
+       Partition actualNewPartition) {
+     assertEquals(expectedOldPartition.getValues(), actualOldPartitionValues);
+     assertEquals(expectedOldPartition.getDbName(), actualOldPartitionDbName);
+     assertEquals(expectedOldPartition.getTableName(), actualOldPartitionTblName);
+ 
+     validatePartition(expectedNewPartition, actualNewPartition);
+   }
+ 
+   private void validateAlterTable(Table expectedOldTable, Table expectedNewTable,
+       Table actualOldTable, Table actualNewTable) {
+     validateTable(expectedOldTable, actualOldTable);
+     validateTable(expectedNewTable, actualNewTable);
+   }
+ 
+   private void validateAlterTableColumns(Table expectedOldTable, Table expectedNewTable,
+       Table actualOldTable, Table actualNewTable) {
+     validateAlterTable(expectedOldTable, expectedNewTable, actualOldTable, actualNewTable);
+ 
+     assertEquals(expectedOldTable.getSd().getCols(), actualOldTable.getSd().getCols());
+     assertEquals(expectedNewTable.getSd().getCols(), actualNewTable.getSd().getCols());
+   }
+ 
+   private void validateLoadPartitionDone(String expectedTableName,
+       Map<String,String> expectedPartitionName, String actualTableName,
+       Map<String,String> actualPartitionName) {
+     assertEquals(expectedPartitionName, actualPartitionName);
+     assertEquals(expectedTableName, actualTableName);
+   }
+ 
+   private void validateDropPartition(Iterator<Partition> expectedPartitions, Iterator<Partition> actualPartitions) {
+     while (expectedPartitions.hasNext()){
+       assertTrue(actualPartitions.hasNext());
+       validatePartition(expectedPartitions.next(), actualPartitions.next());
+     }
+     assertFalse(actualPartitions.hasNext());
+   }
+ 
+   private void validateTableInDropPartition(Table expectedTable, Table actualTable) {
+     validateTable(expectedTable, actualTable);
+   }
+ 
+   private void validateDropTable(Table expectedTable, Table actualTable) {
+     validateTable(expectedTable, actualTable);
+   }
+ 
+   private void validateDropDb(Database expectedDb, Database actualDb) {
+     assertEquals(expectedDb, actualDb);
+   }
+ 
+   @Test
+   public void testListener() throws Exception {
+     int listSize = 0;
+ 
+     List<ListenerEvent> notifyList = DummyListener.notifyList;
+     List<PreEventContext> preNotifyList = DummyPreListener.notifyList;
+     assertEquals(notifyList.size(), listSize);
+     assertEquals(preNotifyList.size(), listSize);
+ 
+     new DatabaseBuilder()
+         .setName(dbName)
+         .create(msc, conf);
+     listSize++;
+     PreCreateDatabaseEvent preDbEvent = (PreCreateDatabaseEvent)(preNotifyList.get(preNotifyList.size() - 1));
+     Database db = msc.getDatabase(dbName);
+     assertEquals(listSize, notifyList.size());
+     assertEquals(listSize + 1, preNotifyList.size());
+     validateCreateDb(db, preDbEvent.getDatabase());
+ 
+     CreateDatabaseEvent dbEvent = (CreateDatabaseEvent)(notifyList.get(listSize - 1));
+     Assert.assertTrue(dbEvent.getStatus());
+     validateCreateDb(db, dbEvent.getDatabase());
+ 
+     Table table = new TableBuilder()
+         .inDb(db)
+         .setTableName(tblName)
+         .addCol("a", "string")
+         .addPartCol("b", "string")
+         .create(msc, conf);
+     PreCreateTableEvent preTblEvent = (PreCreateTableEvent)(preNotifyList.get(preNotifyList.size() - 1));
+     listSize++;
+     Table tbl = msc.getTable(dbName, tblName);
+     validateCreateTable(tbl, preTblEvent.getTable());
+     assertEquals(notifyList.size(), listSize);
+ 
+     CreateTableEvent tblEvent = (CreateTableEvent)(notifyList.get(listSize - 1));
+     Assert.assertTrue(tblEvent.getStatus());
+     validateCreateTable(tbl, tblEvent.getTable());
+ 
+ 
+     new PartitionBuilder()
+         .inTable(table)
+         .addValue("2011")
+         .addToTable(msc, conf);
+     listSize++;
+     assertEquals(notifyList.size(), listSize);
+     PreAddPartitionEvent prePartEvent = (PreAddPartitionEvent)(preNotifyList.get(preNotifyList.size() - 1));
+ 
+     AddPartitionEvent partEvent = (AddPartitionEvent)(notifyList.get(listSize-1));
+     Assert.assertTrue(partEvent.getStatus());
+     Partition part = msc.getPartition("hive2038", "tmptbl", "b=2011");
+     Partition partAdded = partEvent.getPartitionIterator().next();
++    partAdded.setWriteId(part.getWriteId());
+     validateAddPartition(part, partAdded);
+     validateTableInAddPartition(tbl, partEvent.getTable());
+     validateAddPartition(part, prePartEvent.getPartitions().get(0));
+ 
+     // Test adding multiple partitions in a single partition-set, atomically.
+     int currentTime = (int)System.currentTimeMillis();
+     HiveMetaStoreClient hmsClient = new HiveMetaStoreClient(conf);
+     table = hmsClient.getTable(dbName, "tmptbl");
+     Partition partition1 = new Partition(Arrays.asList("20110101"), dbName, "tmptbl", currentTime,
+                                         currentTime, table.getSd(), table.getParameters());
+     Partition partition2 = new Partition(Arrays.asList("20110102"), dbName, "tmptbl", currentTime,
+                                         currentTime, table.getSd(), table.getParameters());
+     Partition partition3 = new Partition(Arrays.asList("20110103"), dbName, "tmptbl", currentTime,
+                                         currentTime, table.getSd(), table.getParameters());
+     hmsClient.add_partitions(Arrays.asList(partition1, partition2, partition3));
+     ++listSize;
+     AddPartitionEvent multiplePartitionEvent = (AddPartitionEvent)(notifyList.get(listSize-1));
+     assertEquals("Unexpected table value.", table, multiplePartitionEvent.getTable());
+     List<Partition> multiParts = Lists.newArrayList(multiplePartitionEvent.getPartitionIterator());
+     assertEquals("Unexpected number of partitions in event!", 3, multiParts.size());
+     assertEquals("Unexpected partition value.", partition1.getValues(), multiParts.get(0).getValues());
+     assertEquals("Unexpected partition value.", partition2.getValues(), multiParts.get(1).getValues());
+     assertEquals("Unexpected partition value.", partition3.getValues(), multiParts.get(2).getValues());
+ 
+     part.setLastAccessTime((int)(System.currentTimeMillis()/1000));
+     msc.alter_partition(dbName, tblName, part);
+     listSize++;
+     assertEquals(notifyList.size(), listSize);
+     PreAlterPartitionEvent preAlterPartEvent =
+         (PreAlterPartitionEvent)preNotifyList.get(preNotifyList.size() - 1);
+ 
+     //the partition did not change,
+     // so the new partition should be similar to the original partition
+     Partition origP = msc.getPartition(dbName, tblName, "b=2011");
+ 
+     AlterPartitionEvent alterPartEvent = (AlterPartitionEvent)notifyList.get(listSize - 1);
+     Assert.assertTrue(alterPartEvent.getStatus());
+     validateAlterPartition(origP, origP, alterPartEvent.getOldPartition().getDbName(),
+         alterPartEvent.getOldPartition().getTableName(),
+         alterPartEvent.getOldPartition().getValues(), alterPartEvent.getNewPartition());
+ 
+ 
+     validateAlterPartition(origP, origP, preAlterPartEvent.getDbName(),
+         preAlterPartEvent.getTableName(), preAlterPartEvent.getNewPartition().getValues(),
+         preAlterPartEvent.getNewPartition());
+ 
+     List<String> part_vals = new ArrayList<>();
+     part_vals.add("c=2012");
+     int preEventListSize;
+     preEventListSize = preNotifyList.size() + 1;
+     Partition newPart = msc.appendPartition(dbName, tblName, part_vals);
+ 
+     listSize++;
+     assertEquals(notifyList.size(), listSize);
+     assertEquals(preNotifyList.size(), preEventListSize);
+ 
+     AddPartitionEvent appendPartEvent =
+         (AddPartitionEvent)(notifyList.get(listSize-1));
+     Partition partAppended = appendPartEvent.getPartitionIterator().next();
+     validateAddPartition(newPart, partAppended);
+ 
+     PreAddPartitionEvent preAppendPartEvent =
+         (PreAddPartitionEvent)(preNotifyList.get(preNotifyList.size() - 1));
+     validateAddPartition(newPart, preAppendPartEvent.getPartitions().get(0));
+ 
+     Table renamedTable = new Table(table);
+     renamedTable.setTableName(renamed);
+     msc.alter_table(dbName, tblName, renamedTable);
+     listSize++;
+     assertEquals(notifyList.size(), listSize);
+     PreAlterTableEvent preAlterTableE = (PreAlterTableEvent) preNotifyList.get(preNotifyList.size() - 1);
+ 
+     renamedTable = msc.getTable(dbName, renamed);
+ 
+     AlterTableEvent alterTableE = (AlterTableEvent) notifyList.get(listSize-1);
+     Assert.assertTrue(alterTableE.getStatus());
+     validateAlterTable(tbl, renamedTable, alterTableE.getOldTable(), alterTableE.getNewTable());
+     validateAlterTable(tbl, renamedTable, preAlterTableE.getOldTable(),
+         preAlterTableE.getNewTable());
+ 
+     //change the table name back
+     table = new Table(renamedTable);
+     table.setTableName(tblName);
+     msc.alter_table(dbName, renamed, table);
+     listSize++;
+     assertEquals(notifyList.size(), listSize);
+ 
+     table = msc.getTable(dbName, tblName);
+     table.getSd().addToCols(new FieldSchema("c", "int", ""));
+     msc.alter_table(dbName, tblName, table);
+     listSize++;
+     assertEquals(notifyList.size(), listSize);
+     preAlterTableE = (PreAlterTableEvent) preNotifyList.get(preNotifyList.size() - 1);
+ 
+     Table altTable = msc.getTable(dbName, tblName);
+ 
+     alterTableE = (AlterTableEvent) notifyList.get(listSize-1);
+     Assert.assertTrue(alterTableE.getStatus());
+     validateAlterTableColumns(tbl, altTable, alterTableE.getOldTable(), alterTableE.getNewTable());
+     validateAlterTableColumns(tbl, altTable, preAlterTableE.getOldTable(),
+         preAlterTableE.getNewTable());
+ 
+     Map<String,String> kvs = new HashMap<>(1);
+     kvs.put("b", "2011");
+     msc.markPartitionForEvent("hive2038", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
+     listSize++;
+     assertEquals(notifyList.size(), listSize);
+ 
+     LoadPartitionDoneEvent partMarkEvent = (LoadPartitionDoneEvent)notifyList.get(listSize - 1);
+     Assert.assertTrue(partMarkEvent.getStatus());
+     validateLoadPartitionDone("tmptbl", kvs, partMarkEvent.getTable().getTableName(),
+         partMarkEvent.getPartitionName());
+ 
+     PreLoadPartitionDoneEvent prePartMarkEvent =
+         (PreLoadPartitionDoneEvent)preNotifyList.get(preNotifyList.size() - 1);
+     validateLoadPartitionDone("tmptbl", kvs, prePartMarkEvent.getTableName(),
+         prePartMarkEvent.getPartitionName());
+ 
+     msc.dropPartition(dbName, tblName, Collections.singletonList("2011"));
+     listSize++;
+     assertEquals(notifyList.size(), listSize);
+     PreDropPartitionEvent preDropPart = (PreDropPartitionEvent) preNotifyList.get(preNotifyList
+         .size() - 1);
+ 
+     DropPartitionEvent dropPart = (DropPartitionEvent)notifyList.get(listSize - 1);
+     Assert.assertTrue(dropPart.getStatus());
+     validateDropPartition(Collections.singletonList(part).iterator(), dropPart.getPartitionIterator());
+     validateTableInDropPartition(tbl, dropPart.getTable());
+ 
+     validateDropPartition(Collections.singletonList(part).iterator(), preDropPart.getPartitionIterator());
+     validateTableInDropPartition(tbl, preDropPart.getTable());
+ 
+     msc.dropTable(dbName, tblName);
+     listSize++;
+     assertEquals(notifyList.size(), listSize);
+     PreDropTableEvent preDropTbl = (PreDropTableEvent)preNotifyList.get(preNotifyList.size() - 1);
+ 
+     DropTableEvent dropTbl = (DropTableEvent)notifyList.get(listSize-1);
+     Assert.assertTrue(dropTbl.getStatus());
+     validateDropTable(tbl, dropTbl.getTable());
+     validateDropTable(tbl, preDropTbl.getTable());
+ 
+     msc.dropDatabase(dbName);
+     listSize++;
+     assertEquals(notifyList.size(), listSize);
+     PreDropDatabaseEvent preDropDB = (PreDropDatabaseEvent)preNotifyList.get(preNotifyList.size() - 1);
+ 
+     DropDatabaseEvent dropDB = (DropDatabaseEvent)notifyList.get(listSize-1);
+     Assert.assertTrue(dropDB.getStatus());
+     validateDropDb(db, dropDB.getDatabase());
+     validateDropDb(db, preDropDB.getDatabase());
+ 
+     msc.setMetaConf("metastore.try.direct.sql", "false");
+     ConfigChangeEvent event = (ConfigChangeEvent) notifyList.get(notifyList.size() - 1);
+     assertEquals("metastore.try.direct.sql", event.getKey());
+     assertEquals("true", event.getOldValue());
+     assertEquals("false", event.getNewValue());
+   }
+ 
+   @Test
+   public void testMetaConfNotifyListenersClosingClient() throws Exception {
+     HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null);
+     closingClient.setMetaConf(metaConfKey, "[test pattern modified]");
+     ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
+     assertEquals(event.getOldValue(), metaConfVal);
+     assertEquals(event.getNewValue(), "[test pattern modified]");
+     closingClient.close();
+ 
+     Thread.sleep(2 * 1000);
+ 
+     event = (ConfigChangeEvent) DummyListener.getLastEvent();
+     assertEquals(event.getOldValue(), "[test pattern modified]");
+     assertEquals(event.getNewValue(), metaConfVal);
+   }
+ 
+   @Test
+   public void testMetaConfNotifyListenersNonClosingClient() throws Exception {
+     HiveMetaStoreClient nonClosingClient = new HiveMetaStoreClient(conf, null);
+     nonClosingClient.setMetaConf(metaConfKey, "[test pattern modified]");
+     ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
+     assertEquals(event.getOldValue(), metaConfVal);
+     assertEquals(event.getNewValue(), "[test pattern modified]");
+     // This should also trigger meta listener notification via TServerEventHandler#deleteContext
+     nonClosingClient.getTTransport().close();
+ 
+     Thread.sleep(2 * 1000);
+ 
+     event = (ConfigChangeEvent) DummyListener.getLastEvent();
+     assertEquals(event.getOldValue(), "[test pattern modified]");
+     assertEquals(event.getNewValue(), metaConfVal);
+   }
+ 
+   @Test
+   public void testMetaConfDuplicateNotification() throws Exception {
+     HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null);
+     closingClient.setMetaConf(metaConfKey, metaConfVal);
+     int beforeCloseNotificationEventCounts = DummyListener.notifyList.size();
+     closingClient.close();
+ 
+     Thread.sleep(2 * 1000);
+ 
+     int afterCloseNotificationEventCounts = DummyListener.notifyList.size();
+     // Setting key to same value, should not trigger configChange event during shutdown
+     assertEquals(beforeCloseNotificationEventCounts, afterCloseNotificationEventCounts);
+   }
+ 
+   @Test
+   public void testMetaConfSameHandler() throws Exception {
+     HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null);
+     closingClient.setMetaConf(metaConfKey, "[test pattern modified]");
+     ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
+     int beforeCloseNotificationEventCounts = DummyListener.notifyList.size();
+     IHMSHandler beforeHandler = event.getIHMSHandler();
+     closingClient.close();
+ 
+     Thread.sleep(2 * 1000);
+     event = (ConfigChangeEvent) DummyListener.getLastEvent();
+     int afterCloseNotificationEventCounts = DummyListener.notifyList.size();
+     IHMSHandler afterHandler = event.getIHMSHandler();
+     // Meta-conf cleanup should trigger an event to listener
+     assertNotSame(beforeCloseNotificationEventCounts, afterCloseNotificationEventCounts);
+     // Both the handlers should be same
+     assertEquals(beforeHandler, afterHandler);
+   }
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
index 0000000,833e2bd..995271a
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
@@@ -1,0 -1,904 +1,904 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore;
+ 
+ import com.codahale.metrics.Counter;
+ import com.google.common.base.Supplier;
+ import com.google.common.collect.ImmutableList;
+ import org.apache.hadoop.hive.metastore.ObjectStore.RetryingExecutor;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest;
+ import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
+ import org.apache.hadoop.hive.metastore.api.Catalog;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+ import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.Function;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
+ import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PrincipalType;
+ import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+ import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
+ import org.apache.hadoop.hive.metastore.api.Role;
+ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.HiveObjectPrivilegeBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.HiveObjectRefBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.PrivilegeGrantInfoBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.messaging.EventMessage;
+ import org.apache.hadoop.hive.metastore.metrics.Metrics;
+ import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+ import org.apache.hadoop.hive.metastore.model.MNotificationLog;
+ import org.apache.hadoop.hive.metastore.model.MNotificationNextId;
+ import org.junit.Assert;
+ import org.junit.Assume;
+ import org.junit.Before;
+ import org.junit.Ignore;
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ import org.mockito.Mockito;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import javax.jdo.Query;
+ import java.sql.Connection;
+ import java.sql.DriverManager;
+ import java.sql.ResultSet;
+ import java.sql.SQLException;
+ import java.sql.Statement;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.HashMap;
+ import java.util.HashSet;
+ import java.util.List;
+ import java.util.Set;
+ import java.util.concurrent.BrokenBarrierException;
+ import java.util.concurrent.CyclicBarrier;
+ import java.util.concurrent.ExecutionException;
+ import java.util.concurrent.ExecutorService;
+ import java.util.concurrent.Executors;
+ import java.util.concurrent.TimeUnit;
+ 
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+ 
+ @Category(MetastoreUnitTest.class)
+ public class TestObjectStore {
+   private ObjectStore objectStore = null;
+   private Configuration conf;
+ 
+   private static final String DB1 = "testobjectstoredb1";
+   private static final String DB2 = "testobjectstoredb2";
+   private static final String TABLE1 = "testobjectstoretable1";
+   private static final String KEY1 = "testobjectstorekey1";
+   private static final String KEY2 = "testobjectstorekey2";
+   private static final String OWNER = "testobjectstoreowner";
+   private static final String USER1 = "testobjectstoreuser1";
+   private static final String ROLE1 = "testobjectstorerole1";
+   private static final String ROLE2 = "testobjectstorerole2";
+   private static final Logger LOG = LoggerFactory.getLogger(TestObjectStore.class.getName());
+ 
+   private static final class LongSupplier implements Supplier<Long> {
+     public long value = 0;
+ 
+     @Override
+     public Long get() {
+       return value;
+     }
+   }
+ 
+   @Before
+   public void setUp() throws Exception {
+     conf = MetastoreConf.newMetastoreConf();
+     MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true);
+     MetaStoreTestUtils.setConfForStandloneMode(conf);
+ 
+     objectStore = new ObjectStore();
+     objectStore.setConf(conf);
+     dropAllStoreObjects(objectStore);
+     HiveMetaStore.HMSHandler.createDefaultCatalog(objectStore, new Warehouse(conf));
+   }
+ 
+   @Test
+   public void catalogs() throws MetaException, NoSuchObjectException {
+     final String names[] = {"cat1", "cat2"};
+     final String locations[] = {"loc1", "loc2"};
+     final String descriptions[] = {"description 1", "description 2"};
+ 
+     for (int i = 0; i < names.length; i++) {
+       Catalog cat = new CatalogBuilder()
+           .setName(names[i])
+           .setLocation(locations[i])
+           .setDescription(descriptions[i])
+           .build();
+       objectStore.createCatalog(cat);
+     }
+ 
+     List<String> fetchedNames = objectStore.getCatalogs();
+     Assert.assertEquals(3, fetchedNames.size());
+     for (int i = 0; i < names.length - 1; i++) {
+       Assert.assertEquals(names[i], fetchedNames.get(i));
+       Catalog cat = objectStore.getCatalog(fetchedNames.get(i));
+       Assert.assertEquals(names[i], cat.getName());
+       Assert.assertEquals(descriptions[i], cat.getDescription());
+       Assert.assertEquals(locations[i], cat.getLocationUri());
+     }
+     Catalog cat = objectStore.getCatalog(fetchedNames.get(2));
+     Assert.assertEquals(DEFAULT_CATALOG_NAME, cat.getName());
+     Assert.assertEquals(Warehouse.DEFAULT_CATALOG_COMMENT, cat.getDescription());
+     // Location will vary by system.
+ 
+     for (int i = 0; i < names.length; i++) objectStore.dropCatalog(names[i]);
+     fetchedNames = objectStore.getCatalogs();
+     Assert.assertEquals(1, fetchedNames.size());
+   }
+ 
+   @Test(expected = NoSuchObjectException.class)
+   public void getNoSuchCatalog() throws MetaException, NoSuchObjectException {
+     objectStore.getCatalog("no_such_catalog");
+   }
+ 
+   @Test(expected = NoSuchObjectException.class)
+   public void dropNoSuchCatalog() throws MetaException, NoSuchObjectException {
+     objectStore.dropCatalog("no_such_catalog");
+   }
+ 
+   // TODO test dropping non-empty catalog
+ 
+   /**
+    * Test database operations
+    */
+   @Test
+   public void testDatabaseOps() throws MetaException, InvalidObjectException,
+       NoSuchObjectException {
+     String catName = "tdo1_cat";
+     createTestCatalog(catName);
+     Database db1 = new Database(DB1, "description", "locationurl", null);
+     Database db2 = new Database(DB2, "description", "locationurl", null);
+     db1.setCatalogName(catName);
+     db2.setCatalogName(catName);
+     objectStore.createDatabase(db1);
+     objectStore.createDatabase(db2);
+ 
+     List<String> databases = objectStore.getAllDatabases(catName);
+     LOG.info("databases: " + databases);
+     Assert.assertEquals(2, databases.size());
+     Assert.assertEquals(DB1, databases.get(0));
+     Assert.assertEquals(DB2, databases.get(1));
+ 
+     objectStore.dropDatabase(catName, DB1);
+     databases = objectStore.getAllDatabases(catName);
+     Assert.assertEquals(1, databases.size());
+     Assert.assertEquals(DB2, databases.get(0));
+ 
+     objectStore.dropDatabase(catName, DB2);
+   }
+ 
+   /**
+    * Test table operations
+    */
+   @Test
+   public void testTableOps() throws MetaException, InvalidObjectException, NoSuchObjectException,
+       InvalidInputException {
+     Database db1 = new DatabaseBuilder()
+         .setName(DB1)
+         .setDescription("description")
+         .setLocation("locationurl")
+         .build(conf);
+     objectStore.createDatabase(db1);
+     StorageDescriptor sd1 =
+         new StorageDescriptor(ImmutableList.of(new FieldSchema("pk_col", "double", null)),
+             "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null),
+             null, null, null);
+     HashMap<String, String> params = new HashMap<>();
+     params.put("EXTERNAL", "false");
+     Table tbl1 =
+         new Table(TABLE1, DB1, "owner", 1, 2, 3, sd1, null, params, null, null, "MANAGED_TABLE");
+     objectStore.createTable(tbl1);
+ 
+     List<String> tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1);
+     Assert.assertEquals(1, tables.size());
+     Assert.assertEquals(TABLE1, tables.get(0));
+ 
+     StorageDescriptor sd2 =
+         new StorageDescriptor(ImmutableList.of(new FieldSchema("fk_col", "double", null)),
+             "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null),
+             null, null, null);
+     Table newTbl1 = new Table("new" + TABLE1, DB1, "owner", 1, 2, 3, sd2, null, params, null, null,
+         "MANAGED_TABLE");
+ 
+     // Change different fields and verify they were altered
+     newTbl1.setOwner("role1");
+     newTbl1.setOwnerType(PrincipalType.ROLE);
+ 
 -    objectStore.alterTable(DEFAULT_CATALOG_NAME, DB1, TABLE1, newTbl1);
++    objectStore.alterTable(DEFAULT_CATALOG_NAME, DB1, TABLE1, newTbl1, -1, null);
+     tables = objectStore.getTables(DEFAULT_CATALOG_NAME, DB1, "new*");
+     Assert.assertEquals(1, tables.size());
+     Assert.assertEquals("new" + TABLE1, tables.get(0));
+ 
+     // Verify fields were altered during the alterTable operation
+     Table alteredTable = objectStore.getTable(DEFAULT_CATALOG_NAME, DB1, "new" + TABLE1);
+     Assert.assertEquals("Owner of table was not altered", newTbl1.getOwner(), alteredTable.getOwner());
+     Assert.assertEquals("Owner type of table was not altered", newTbl1.getOwnerType(), alteredTable.getOwnerType());
+ 
+     objectStore.createTable(tbl1);
+     tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1);
+     Assert.assertEquals(2, tables.size());
+ 
+     List<SQLForeignKey> foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, DB1, TABLE1, null, null);
+     Assert.assertEquals(0, foreignKeys.size());
+ 
+     SQLPrimaryKey pk = new SQLPrimaryKey(DB1, TABLE1, "pk_col", 1,
+         "pk_const_1", false, false, false);
+     pk.setCatName(DEFAULT_CATALOG_NAME);
+     objectStore.addPrimaryKeys(ImmutableList.of(pk));
+     SQLForeignKey fk = new SQLForeignKey(DB1, TABLE1, "pk_col",
+         DB1, "new" + TABLE1, "fk_col", 1,
+         0, 0, "fk_const_1", "pk_const_1", false, false, false);
+     objectStore.addForeignKeys(ImmutableList.of(fk));
+ 
+     // Retrieve from PK side
+     foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, DB1, "new" + TABLE1);
+     Assert.assertEquals(1, foreignKeys.size());
+ 
+     List<SQLForeignKey> fks = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, DB1, "new" + TABLE1);
+     if (fks != null) {
+       for (SQLForeignKey fkcol : fks) {
+         objectStore.dropConstraint(fkcol.getCatName(), fkcol.getFktable_db(), fkcol.getFktable_name(),
+             fkcol.getFk_name());
+       }
+     }
+     // Retrieve from FK side
+     foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, DB1, TABLE1, null, null);
+     Assert.assertEquals(0, foreignKeys.size());
+     // Retrieve from PK side
+     foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, DB1, "new" + TABLE1);
+     Assert.assertEquals(0, foreignKeys.size());
+ 
+     objectStore.dropTable(DEFAULT_CATALOG_NAME, DB1, TABLE1);
+     tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1);
+     Assert.assertEquals(1, tables.size());
+ 
+     objectStore.dropTable(DEFAULT_CATALOG_NAME, DB1, "new" + TABLE1);
+     tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1);
+     Assert.assertEquals(0, tables.size());
+ 
+     objectStore.dropDatabase(db1.getCatalogName(), DB1);
+   }
+ 
+   private StorageDescriptor createFakeSd(String location) {
+     return new StorageDescriptor(null, location, null, null, false, 0,
+         new SerDeInfo("SerDeName", "serializationLib", null), null, null, null);
+   }
+ 
+ 
+   /**
+    * Tests partition operations
+    */
+   @Test
+   public void testPartitionOps() throws MetaException, InvalidObjectException,
+       NoSuchObjectException, InvalidInputException {
+     Database db1 = new DatabaseBuilder()
+         .setName(DB1)
+         .setDescription("description")
+         .setLocation("locationurl")
+         .build(conf);
+     objectStore.createDatabase(db1);
+     StorageDescriptor sd = createFakeSd("location");
+     HashMap<String, String> tableParams = new HashMap<>();
+     tableParams.put("EXTERNAL", "false");
+     FieldSchema partitionKey1 = new FieldSchema("Country", ColumnType.STRING_TYPE_NAME, "");
+     FieldSchema partitionKey2 = new FieldSchema("State", ColumnType.STRING_TYPE_NAME, "");
+     Table tbl1 =
+         new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, Arrays.asList(partitionKey1, partitionKey2),
+             tableParams, null, null, "MANAGED_TABLE");
+     objectStore.createTable(tbl1);
+     HashMap<String, String> partitionParams = new HashMap<>();
+     partitionParams.put("PARTITION_LEVEL_PRIVILEGE", "true");
+     List<String> value1 = Arrays.asList("US", "CA");
+     Partition part1 = new Partition(value1, DB1, TABLE1, 111, 111, sd, partitionParams);
+     part1.setCatName(DEFAULT_CATALOG_NAME);
+     objectStore.addPartition(part1);
+     List<String> value2 = Arrays.asList("US", "MA");
+     Partition part2 = new Partition(value2, DB1, TABLE1, 222, 222, sd, partitionParams);
+     part2.setCatName(DEFAULT_CATALOG_NAME);
+     objectStore.addPartition(part2);
+ 
+     Deadline.startTimer("getPartition");
+     List<Partition> partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10);
+     Assert.assertEquals(2, partitions.size());
+     Assert.assertEquals(111, partitions.get(0).getCreateTime());
+     Assert.assertEquals(222, partitions.get(1).getCreateTime());
+ 
+     int numPartitions = objectStore.getNumPartitionsByFilter(DEFAULT_CATALOG_NAME, DB1, TABLE1, "");
+     Assert.assertEquals(partitions.size(), numPartitions);
+ 
+     numPartitions = objectStore.getNumPartitionsByFilter(DEFAULT_CATALOG_NAME, DB1, TABLE1, "country = \"US\"");
+     Assert.assertEquals(2, numPartitions);
+ 
+     objectStore.dropPartition(DEFAULT_CATALOG_NAME, DB1, TABLE1, value1);
+     partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10);
+     Assert.assertEquals(1, partitions.size());
+     Assert.assertEquals(222, partitions.get(0).getCreateTime());
+ 
+     objectStore.dropPartition(DEFAULT_CATALOG_NAME, DB1, TABLE1, value2);
+     objectStore.dropTable(DEFAULT_CATALOG_NAME, DB1, TABLE1);
+     objectStore.dropDatabase(db1.getCatalogName(), DB1);
+   }
+ 
+   /**
+    * Checks if the JDO cache is able to handle directSQL partition drops in one session.
+    * @throws MetaException
+    * @throws InvalidObjectException
+    * @throws NoSuchObjectException
+    * @throws SQLException
+    */
+   @Test
+   public void testDirectSQLDropPartitionsCacheInSession()
+       throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException {
+     createPartitionedTable(false, false);
+     // query the partitions with JDO
+     Deadline.startTimer("getPartition");
+     List<Partition> partitions = objectStore.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+         10, false, true);
+     Assert.assertEquals(3, partitions.size());
+ 
+     // drop partitions with directSql
+     objectStore.dropPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+         Arrays.asList("test_part_col=a0", "test_part_col=a1"), true, false);
+ 
+     // query the partitions with JDO, checking the cache is not causing any problem
+     partitions = objectStore.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+         10, false, true);
+     Assert.assertEquals(1, partitions.size());
+   }
+ 
+   /**
+    * Checks if the JDO cache is able to handle directSQL partition drops cross sessions.
+    * @throws MetaException
+    * @throws InvalidObjectException
+    * @throws NoSuchObjectException
+    * @throws SQLException
+    */
+   @Test
+   public void testDirectSQLDropPartitionsCacheCrossSession()
+       throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException {
+     ObjectStore objectStore2 = new ObjectStore();
+     objectStore2.setConf(conf);
+ 
+     createPartitionedTable(false, false);
+     // query the partitions with JDO in the 1st session
+     Deadline.startTimer("getPartition");
+     List<Partition> partitions = objectStore.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+         10, false, true);
+     Assert.assertEquals(3, partitions.size());
+ 
+     // query the partitions with JDO in the 2nd session
+     partitions = objectStore2.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10,
+         false, true);
+     Assert.assertEquals(3, partitions.size());
+ 
+     // drop partitions with directSql in the 1st session
+     objectStore.dropPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+         Arrays.asList("test_part_col=a0", "test_part_col=a1"), true, false);
+ 
+     // query the partitions with JDO in the 2nd session, checking the cache is not causing any
+     // problem
+     partitions = objectStore2.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+         10, false, true);
+     Assert.assertEquals(1, partitions.size());
+   }
+ 
+   /**
+    * Checks if the directSQL partition drop removes every connected data from the RDBMS tables.
+    * @throws MetaException
+    * @throws InvalidObjectException
+    * @throws NoSuchObjectException
+    * @throws SQLException
+    */
+   @Test
+   public void testDirectSQLDropParitionsCleanup() throws MetaException, InvalidObjectException,
+       NoSuchObjectException, SQLException, InvalidInputException {
+ 
+     createPartitionedTable(true, true);
+ 
+     // Check, that every table in the expected state before the drop
+     checkBackendTableSize("PARTITIONS", 3);
+     checkBackendTableSize("PART_PRIVS", 3);
+     checkBackendTableSize("PART_COL_PRIVS", 3);
+     checkBackendTableSize("PART_COL_STATS", 3);
+     checkBackendTableSize("PARTITION_PARAMS", 3);
+     checkBackendTableSize("PARTITION_KEY_VALS", 3);
+     checkBackendTableSize("SD_PARAMS", 3);
+     checkBackendTableSize("BUCKETING_COLS", 3);
+     checkBackendTableSize("SKEWED_COL_NAMES", 3);
+     checkBackendTableSize("SDS", 4); // Table has an SDS
+     checkBackendTableSize("SORT_COLS", 3);
+     checkBackendTableSize("SERDE_PARAMS", 3);
+     checkBackendTableSize("SERDES", 4); // Table has a serde
+ 
+     // drop the partitions
+     Deadline.startTimer("dropPartitions");
+     objectStore.dropPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+         Arrays.asList("test_part_col=a0", "test_part_col=a1", "test_part_col=a2"), true, false);
+ 
+     // Check, if every data is dropped connected to the partitions
+     checkBackendTableSize("PARTITIONS", 0);
+     checkBackendTableSize("PART_PRIVS", 0);
+     checkBackendTableSize("PART_COL_PRIVS", 0);
+     checkBackendTableSize("PART_COL_STATS", 0);
+     checkBackendTableSize("PARTITION_PARAMS", 0);
+     checkBackendTableSize("PARTITION_KEY_VALS", 0);
+     checkBackendTableSize("SD_PARAMS", 0);
+     checkBackendTableSize("BUCKETING_COLS", 0);
+     checkBackendTableSize("SKEWED_COL_NAMES", 0);
+     checkBackendTableSize("SDS", 1); // Table has an SDS
+     checkBackendTableSize("SORT_COLS", 0);
+     checkBackendTableSize("SERDE_PARAMS", 0);
+     checkBackendTableSize("SERDES", 1); // Table has a serde
+   }
+ 
+   /**
+    * Creates DB1 database, TABLE1 table with 3 partitions.
+    * @param withPrivileges Should we create privileges as well
+    * @param withStatistics Should we create statitics as well
+    * @throws MetaException
+    * @throws InvalidObjectException
+    */
+   private void createPartitionedTable(boolean withPrivileges, boolean withStatistics)
+       throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException {
+     Database db1 = new DatabaseBuilder()
+                        .setName(DB1)
+                        .setDescription("description")
+                        .setLocation("locationurl")
+                        .build(conf);
+     objectStore.createDatabase(db1);
+     Table tbl1 =
+         new TableBuilder()
+             .setDbName(DB1)
+             .setTableName(TABLE1)
+             .addCol("test_col1", "int")
+             .addCol("test_col2", "int")
+             .addPartCol("test_part_col", "int")
+             .addCol("test_bucket_col", "int", "test bucket col comment")
+             .addCol("test_skewed_col", "int", "test skewed col comment")
+             .addCol("test_sort_col", "int", "test sort col comment")
+             .build(conf);
+     objectStore.createTable(tbl1);
+ 
+     PrivilegeBag privilegeBag = new PrivilegeBag();
+     // Create partitions for the partitioned table
+     for(int i=0; i < 3; i++) {
+       Partition part = new PartitionBuilder()
+                            .inTable(tbl1)
+                            .addValue("a" + i)
+                            .addSerdeParam("serdeParam", "serdeParamValue")
+                            .addStorageDescriptorParam("sdParam", "sdParamValue")
+                            .addBucketCol("test_bucket_col")
+                            .addSkewedColName("test_skewed_col")
+                            .addSortCol("test_sort_col", 1)
+                            .build(conf);
+       objectStore.addPartition(part);
+ 
+       if (withPrivileges) {
+         HiveObjectRef partitionReference = new HiveObjectRefBuilder().buildPartitionReference(part);
+         HiveObjectRef partitionColumnReference = new HiveObjectRefBuilder()
+             .buildPartitionColumnReference(tbl1, "test_part_col", part.getValues());
+         PrivilegeGrantInfo privilegeGrantInfo = new PrivilegeGrantInfoBuilder()
+             .setPrivilege("a")
+             .build();
+         HiveObjectPrivilege partitionPriv = new HiveObjectPrivilegeBuilder()
+                                                 .setHiveObjectRef(partitionReference)
+                                                 .setPrincipleName("a")
+                                                 .setPrincipalType(PrincipalType.USER)
+                                                 .setGrantInfo(privilegeGrantInfo)
+                                                 .build();
+         privilegeBag.addToPrivileges(partitionPriv);
+         HiveObjectPrivilege partitionColPriv = new HiveObjectPrivilegeBuilder()
+                                                    .setHiveObjectRef(partitionColumnReference)
+                                                    .setPrincipleName("a")
+                                                    .setPrincipalType(PrincipalType.USER)
+                                                    .setGrantInfo(privilegeGrantInfo)
+                                                    .build();
+         privilegeBag.addToPrivileges(partitionColPriv);
+       }
+ 
+       if (withStatistics) {
+         ColumnStatistics stats = new ColumnStatistics();
+         ColumnStatisticsDesc desc = new ColumnStatisticsDesc();
+         desc.setCatName(tbl1.getCatName());
+         desc.setDbName(tbl1.getDbName());
+         desc.setTableName(tbl1.getTableName());
+         desc.setPartName("test_part_col=a" + i);
+         stats.setStatsDesc(desc);
+ 
+         List<ColumnStatisticsObj> statsObjList = new ArrayList<>(1);
+         stats.setStatsObj(statsObjList);
+ 
+         ColumnStatisticsData data = new ColumnStatisticsData();
+         BooleanColumnStatsData boolStats = new BooleanColumnStatsData();
+         boolStats.setNumTrues(0);
+         boolStats.setNumFalses(0);
+         boolStats.setNumNulls(0);
+         data.setBooleanStats(boolStats);
+ 
+         ColumnStatisticsObj partStats = new ColumnStatisticsObj("test_part_col", "int", data);
+         statsObjList.add(partStats);
+ 
 -        objectStore.updatePartitionColumnStatistics(stats, part.getValues());
++        objectStore.updatePartitionColumnStatistics(stats, part.getValues(), -1, null, -1);
+       }
+     }
+     if (withPrivileges) {
+       objectStore.grantPrivileges(privilegeBag);
+     }
+   }
+ 
+   /**
+    * Checks if the HMS backend db row number is as expected. If they are not, an
+    * {@link AssertionError} is thrown.
+    * @param tableName The table in which we count the rows
+    * @param size The expected row number
+    * @throws SQLException If there is a problem connecting to / querying the backend DB
+    */
+   private void checkBackendTableSize(String tableName, int size) throws SQLException {
+     String connectionStr = MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CONNECT_URL_KEY);
+     Connection conn = DriverManager.getConnection(connectionStr);
+     Statement stmt = conn.createStatement();
+ 
+     ResultSet rs = stmt.executeQuery("SELECT COUNT(1) FROM " + tableName);
+     rs.next();
+     Assert.assertEquals(tableName + " table should contain " + size + " rows", size,
+         rs.getLong(1));
+   }
+ 
+   /**
+    * Test master keys operation
+    */
+   @Test
+   public void testMasterKeyOps() throws MetaException, NoSuchObjectException {
+     int id1 = objectStore.addMasterKey(KEY1);
+     int id2 = objectStore.addMasterKey(KEY2);
+ 
+     String[] keys = objectStore.getMasterKeys();
+     Assert.assertEquals(2, keys.length);
+     Assert.assertEquals(KEY1, keys[0]);
+     Assert.assertEquals(KEY2, keys[1]);
+ 
+     objectStore.updateMasterKey(id1, "new" + KEY1);
+     objectStore.updateMasterKey(id2, "new" + KEY2);
+     keys = objectStore.getMasterKeys();
+     Assert.assertEquals(2, keys.length);
+     Assert.assertEquals("new" + KEY1, keys[0]);
+     Assert.assertEquals("new" + KEY2, keys[1]);
+ 
+     objectStore.removeMasterKey(id1);
+     keys = objectStore.getMasterKeys();
+     Assert.assertEquals(1, keys.length);
+     Assert.assertEquals("new" + KEY2, keys[0]);
+ 
+     objectStore.removeMasterKey(id2);
+   }
+ 
+   /**
+    * Test role operation
+    */
+   @Test
+   public void testRoleOps() throws InvalidObjectException, MetaException, NoSuchObjectException {
+     objectStore.addRole(ROLE1, OWNER);
+     objectStore.addRole(ROLE2, OWNER);
+     List<String> roles = objectStore.listRoleNames();
+     Assert.assertEquals(2, roles.size());
+     Assert.assertEquals(ROLE2, roles.get(1));
+     Role role1 = objectStore.getRole(ROLE1);
+     Assert.assertEquals(OWNER, role1.getOwnerName());
+     objectStore.grantRole(role1, USER1, PrincipalType.USER, OWNER, PrincipalType.ROLE, true);
+     objectStore.revokeRole(role1, USER1, PrincipalType.USER, false);
+     objectStore.removeRole(ROLE1);
+   }
+ 
+   @Test
+   public void testDirectSqlErrorMetrics() throws Exception {
+     Configuration conf = MetastoreConf.newMetastoreConf();
+     MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.METRICS_ENABLED, true);
+     Metrics.initialize(conf);
+     MetastoreConf.setVar(conf, MetastoreConf.ConfVars.HIVE_CODAHALE_METRICS_REPORTER_CLASSES,
+         "org.apache.hadoop.hive.common.metrics.metrics2.JsonFileMetricsReporter, " +
+             "org.apache.hadoop.hive.common.metrics.metrics2.JmxMetricsReporter"
+     );
+ 
+     // recall setup so that we get an object store with the metrics initalized
+     setUp();
+     Counter directSqlErrors =
+         Metrics.getRegistry().getCounters().get(MetricsConstants.DIRECTSQL_ERRORS);
+ 
+     objectStore.new GetDbHelper(DEFAULT_CATALOG_NAME, "foo", true, true) {
+       @Override
+       protected Database getSqlResult(ObjectStore.GetHelper<Database> ctx) throws MetaException {
+         return null;
+       }
+ 
+       @Override
+       protected Database getJdoResult(ObjectStore.GetHelper<Database> ctx) throws MetaException,
+           NoSuchObjectException {
+         return null;
+       }
+     }.run(false);
+ 
+     Assert.assertEquals(0, directSqlErrors.getCount());
+ 
+     objectStore.new GetDbHelper(DEFAULT_CATALOG_NAME, "foo", true, true) {
+       @Override
+       protected Database getSqlResult(ObjectStore.GetHelper<Database> ctx) throws MetaException {
+         throw new RuntimeException();
+       }
+ 
+       @Override
+       protected Database getJdoResult(ObjectStore.GetHelper<Database> ctx) throws MetaException,
+           NoSuchObjectException {
+         return null;
+       }
+     }.run(false);
+ 
+     Assert.assertEquals(1, directSqlErrors.getCount());
+   }
+ 
+   private static void dropAllStoreObjects(RawStore store)
+       throws MetaException, InvalidObjectException, InvalidInputException {
+     try {
+       Deadline.registerIfNot(100000);
+       List<Function> functions = store.getAllFunctions(DEFAULT_CATALOG_NAME);
+       for (Function func : functions) {
+         store.dropFunction(DEFAULT_CATALOG_NAME, func.getDbName(), func.getFunctionName());
+       }
+       for (String catName : store.getCatalogs()) {
+         List<String> dbs = store.getAllDatabases(catName);
+         for (String db : dbs) {
+           List<String> tbls = store.getAllTables(DEFAULT_CATALOG_NAME, db);
+           for (String tbl : tbls) {
+             Deadline.startTimer("getPartition");
+             List<Partition> parts = store.getPartitions(DEFAULT_CATALOG_NAME, db, tbl, 100);
+             for (Partition part : parts) {
+               store.dropPartition(DEFAULT_CATALOG_NAME, db, tbl, part.getValues());
+             }
+             // Find any constraints and drop them
+             Set<String> constraints = new HashSet<>();
+             List<SQLPrimaryKey> pk = store.getPrimaryKeys(DEFAULT_CATALOG_NAME, db, tbl);
+             if (pk != null) {
+               for (SQLPrimaryKey pkcol : pk) {
+                 constraints.add(pkcol.getPk_name());
+               }
+             }
+             List<SQLForeignKey> fks = store.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, db, tbl);
+             if (fks != null) {
+               for (SQLForeignKey fkcol : fks) {
+                 constraints.add(fkcol.getFk_name());
+               }
+             }
+             for (String constraint : constraints) {
+               store.dropConstraint(DEFAULT_CATALOG_NAME, db, tbl, constraint);
+             }
+             store.dropTable(DEFAULT_CATALOG_NAME, db, tbl);
+           }
+           store.dropDatabase(catName, db);
+         }
+         store.dropCatalog(catName);
+       }
+       List<String> roles = store.listRoleNames();
+       for (String role : roles) {
+         store.removeRole(role);
+       }
+     } catch (NoSuchObjectException e) {
+     }
+   }
+ 
+   @Test
+   public void testQueryCloseOnError() throws Exception {
+     ObjectStore spy = Mockito.spy(objectStore);
+     spy.getAllDatabases(DEFAULT_CATALOG_NAME);
+     spy.getAllFunctions(DEFAULT_CATALOG_NAME);
+     spy.getAllTables(DEFAULT_CATALOG_NAME, DB1);
+     spy.getPartitionCount();
+     Mockito.verify(spy, Mockito.times(3))
+         .rollbackAndCleanup(Mockito.anyBoolean(), Mockito.<Query>anyObject());
+   }
+ 
+   @Test
+   public void testRetryingExecutorSleep() throws Exception {
+     RetryingExecutor re = new ObjectStore.RetryingExecutor(MetastoreConf.newMetastoreConf(), null);
+     Assert.assertTrue("invalid sleep value", re.getSleepInterval() >= 0);
+   }
+ 
+   @Ignore // See comment in ObjectStore.getDataSourceProps
+   @Test
+   public void testNonConfDatanucleusValueSet() {
+     String key = "datanucleus.no.such.key";
+     String value = "test_value";
+     String key1 = "blabla.no.such.key";
+     String value1 = "another_value";
+     Assume.assumeTrue(System.getProperty(key) == null);
+     Configuration localConf = MetastoreConf.newMetastoreConf();
+     MetaStoreTestUtils.setConfForStandloneMode(localConf);
+     localConf.set(key, value);
+     localConf.set(key1, value1);
+     objectStore = new ObjectStore();
+     objectStore.setConf(localConf);
+     Assert.assertEquals(value, objectStore.getProp().getProperty(key));
+     Assert.assertNull(objectStore.getProp().getProperty(key1));
+   }
+ 
+   /**
+    * Test notification operations
+    */
+   // TODO MS-SPLIT uncomment once we move EventMessage over
+   @Test
+   public void testNotificationOps() throws InterruptedException, MetaException {
+     final int NO_EVENT_ID = 0;
+     final int FIRST_EVENT_ID = 1;
+     final int SECOND_EVENT_ID = 2;
+ 
+     NotificationEvent event =
+         new NotificationEvent(0, 0, EventMessage.EventType.CREATE_DATABASE.toString(), "");
+     NotificationEventResponse eventResponse;
+     CurrentNotificationEventId eventId;
+ 
+     // Verify that there is no notifications available yet
+     eventId = objectStore.getCurrentNotificationEventId();
+     Assert.assertEquals(NO_EVENT_ID, eventId.getEventId());
+ 
+     // Verify that addNotificationEvent() updates the NotificationEvent with the new event ID
+     objectStore.addNotificationEvent(event);
+     Assert.assertEquals(FIRST_EVENT_ID, event.getEventId());
+     objectStore.addNotificationEvent(event);
+     Assert.assertEquals(SECOND_EVENT_ID, event.getEventId());
+ 
+     // Verify that objectStore fetches the latest notification event ID
+     eventId = objectStore.getCurrentNotificationEventId();
+     Assert.assertEquals(SECOND_EVENT_ID, eventId.getEventId());
+ 
+     // Verify that getNextNotification() returns all events
+     eventResponse = objectStore.getNextNotification(new NotificationEventRequest());
+     Assert.assertEquals(2, eventResponse.getEventsSize());
+     Assert.assertEquals(FIRST_EVENT_ID, eventResponse.getEvents().get(0).getEventId());
+     Assert.assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(1).getEventId());
+ 
+     // Verify that getNextNotification(last) returns events after a specified event
+     eventResponse = objectStore.getNextNotification(new NotificationEventRequest(FIRST_EVENT_ID));
+     Assert.assertEquals(1, eventResponse.getEventsSize());
+     Assert.assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(0).getEventId());
+ 
+     // Verify that getNextNotification(last) returns zero events if there are no more notifications available
+     eventResponse = objectStore.getNextNotification(new NotificationEventRequest(SECOND_EVENT_ID));
+     Assert.assertEquals(0, eventResponse.getEventsSize());
+ 
+     // Verify that cleanNotificationEvents() cleans up all old notifications
+     Thread.sleep(1);
+     objectStore.cleanNotificationEvents(1);
+     eventResponse = objectStore.getNextNotification(new NotificationEventRequest());
+     Assert.assertEquals(0, eventResponse.getEventsSize());
+   }
+ 
+   @Ignore(
+       "This test is here to allow testing with other databases like mysql / postgres etc\n"
+           + " with  user changes to the code. This cannot be run on apache derby because of\n"
+           + " https://db.apache.org/derby/docs/10.10/devguide/cdevconcepts842385.html"
+   )
+   @Test
+   public void testConcurrentAddNotifications() throws ExecutionException, InterruptedException, MetaException {
+ 
+     final int NUM_THREADS = 10;
+     CyclicBarrier cyclicBarrier = new CyclicBarrier(NUM_THREADS,
+         () -> LoggerFactory.getLogger("test")
+             .debug(NUM_THREADS + " threads going to add notification"));
+ 
+     Configuration conf = MetastoreConf.newMetastoreConf();
+     MetaStoreTestUtils.setConfForStandloneMode(conf);
+     /*
+        Below are the properties that need to be set based on what database this test is going to be run
+      */
+ 
+ //    conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER, "com.mysql.jdbc.Driver");
+ //    conf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY,
+ //        "jdbc:mysql://localhost:3306/metastore_db");
+ //    conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME, "");
+ //    conf.setVar(HiveConf.ConfVars.METASTOREPWD, "");
+ 
+     /*
+      we have to  add this one manually as for tests the db is initialized via the metastoreDiretSQL
+      and we don't run the schema creation sql that includes the an insert for notification_sequence
+      which can be locked. the entry in notification_sequence happens via notification_event insertion.
+     */
+     objectStore.getPersistenceManager().newQuery(MNotificationLog.class, "eventType==''").execute();
+     objectStore.getPersistenceManager().newQuery(MNotificationNextId.class, "nextEventId==-1").execute();
+ 
+     objectStore.addNotificationEvent(
+         new NotificationEvent(0, 0,
+             EventMessage.EventType.CREATE_DATABASE.toString(),
+             "CREATE DATABASE DB initial"));
+ 
+     ExecutorService executorService = Executors.newFixedThreadPool(NUM_THREADS);
+     for (int i = 0; i < NUM_THREADS; i++) {
+       final int n = i;
+ 
+       executorService.execute(
+           () -> {
+             ObjectStore store = new ObjectStore();
+             store.setConf(conf);
+ 
+             String eventType = EventMessage.EventType.CREATE_DATABASE.toString();
+             NotificationEvent dbEvent =
+                 new NotificationEvent(0, 0, eventType,
+                     "CREATE DATABASE DB" + n);
+             System.out.println("ADDING NOTIFICATION");
+ 
+             try {
+               cyclicBarrier.await();
+               store.addNotificationEvent(dbEvent);
+             } catch (InterruptedException | BrokenBarrierException | MetaException e) {
+               throw new RuntimeException(e);
+             }
+             System.out.println("FINISH NOTIFICATION");
+           });
+     }
+     executorService.shutdown();
+     Assert.assertTrue(executorService.awaitTermination(15, TimeUnit.SECONDS));
+ 
+     // we have to setup this again as the underlying PMF keeps getting reinitialized with original
+     // reference closed
+     ObjectStore store = new ObjectStore();
+     store.setConf(conf);
+ 
+     NotificationEventResponse eventResponse = store.getNextNotification(
+         new NotificationEventRequest());
+     Assert.assertEquals(NUM_THREADS + 1, eventResponse.getEventsSize());
+     long previousId = 0;
+     for (NotificationEvent event : eventResponse.getEvents()) {
+       Assert.assertTrue("previous:" + previousId + " current:" + event.getEventId(),
+           previousId < event.getEventId());
+       Assert.assertTrue(previousId + 1 == event.getEventId());
+       previousId = event.getEventId();
+     }
+   }
+ 
+   private void createTestCatalog(String catName) throws MetaException {
+     Catalog cat = new CatalogBuilder()
+         .setName(catName)
+         .setLocation("/tmp")
+         .build();
+     objectStore.createCatalog(cat);
+   }
+ }
+ 

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
index 0000000,717c5ee..01a8f81
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
@@@ -1,0 -1,233 +1,233 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore;
+ 
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collections;
+ import java.util.List;
+ 
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog;
+ import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest;
+ import org.apache.hadoop.hive.metastore.api.AggrStats;
+ import org.apache.hadoop.hive.metastore.api.Catalog;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
+ import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
+ import org.junit.After;
+ import org.junit.Assert;
+ import org.junit.Before;
+ import org.junit.Ignore;
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+ 
+ @Category(MetastoreUnitTest.class)
+ public class TestOldSchema {
+   private ObjectStore store = null;
+   private Configuration conf;
+ 
+   private static final Logger LOG = LoggerFactory.getLogger(TestOldSchema.class.getName());
+ 
+   public static class MockPartitionExpressionProxy implements PartitionExpressionProxy {
+     @Override
+     public String convertExprToFilter(byte[] expr) throws MetaException {
+       return null;
+     }
+ 
+     @Override
+     public boolean filterPartitionsByExpr(List<FieldSchema> partColumns, byte[] expr,
+                                           String defaultPartitionName,
+                                           List<String> partitionNames) throws MetaException {
+       return false;
+     }
+ 
+     @Override
+     public FileMetadataExprType getMetadataType(String inputFormat) {
+       return null;
+     }
+ 
+     @Override
+     public SearchArgument createSarg(byte[] expr) {
+       return null;
+     }
+ 
+     @Override
+     public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) {
+       return null;
+     }
+   }
+ 
+   private byte bitVectors[][] = new byte[2][];
+ 
+   @Before
+   public void setUp() throws Exception {
+     conf = MetastoreConf.newMetastoreConf();
+     MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.STATS_FETCH_BITVECTOR, false);
+     MetaStoreTestUtils.setConfForStandloneMode(conf);
+ 
+     store = new ObjectStore();
+     store.setConf(conf);
+     dropAllStoreObjects(store);
+     HiveMetaStore.HMSHandler.createDefaultCatalog(store, new Warehouse(conf));
+ 
+     HyperLogLog hll = HyperLogLog.builder().build();
+     hll.addLong(1);
+     bitVectors[1] = hll.serialize();
+     hll = HyperLogLog.builder().build();
+     hll.addLong(2);
+     hll.addLong(3);
+     hll.addLong(3);
+     hll.addLong(4);
+     bitVectors[0] = hll.serialize();
+   }
+ 
+   @After
+   public void tearDown() {
+   }
+ 
+   /**
+    * Tests partition operations
+    */
+   @Ignore("HIVE-19509: Disable tests that are failing continuously")
+   @Test
+   public void testPartitionOps() throws Exception {
+     String dbName = "default";
+     String tableName = "snp";
+     Database db1 = new DatabaseBuilder()
+         .setName(dbName)
+         .setDescription("description")
+         .setLocation("locationurl")
+         .build(conf);
+     store.createDatabase(db1);
+     long now = System.currentTimeMillis();
+     List<FieldSchema> cols = new ArrayList<>();
+     cols.add(new FieldSchema("col1", "long", "nocomment"));
+     SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
+     StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0,
+         serde, null, null, Collections.emptyMap());
+     List<FieldSchema> partCols = new ArrayList<>();
+     partCols.add(new FieldSchema("ds", "string", ""));
+     Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols,
+         Collections.emptyMap(), null, null, null);
+     store.createTable(table);
+ 
+     Deadline.startTimer("getPartition");
+     for (int i = 0; i < 10; i++) {
+       List<String> partVal = new ArrayList<>();
+       partVal.add(String.valueOf(i));
+       StorageDescriptor psd = new StorageDescriptor(sd);
+       psd.setLocation("file:/tmp/default/hit/ds=" + partVal);
+       Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd,
+           Collections.emptyMap());
+       part.setCatName(DEFAULT_CATALOG_NAME);
+       store.addPartition(part);
+       ColumnStatistics cs = new ColumnStatistics();
+       ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName);
+       desc.setLastAnalyzed(now);
+       desc.setPartName("ds=" + String.valueOf(i));
+       cs.setStatsDesc(desc);
+       ColumnStatisticsObj obj = new ColumnStatisticsObj();
+       obj.setColName("col1");
+       obj.setColType("bigint");
+       ColumnStatisticsData data = new ColumnStatisticsData();
+       LongColumnStatsData dcsd = new LongColumnStatsData();
+       dcsd.setHighValue(1000 + i);
+       dcsd.setLowValue(-1000 - i);
+       dcsd.setNumNulls(i);
+       dcsd.setNumDVs(10 * i + 1);
+       dcsd.setBitVectors(bitVectors[0]);
+       data.setLongStats(dcsd);
+       obj.setStatsData(data);
+       cs.addToStatsObj(obj);
 -      store.updatePartitionColumnStatistics(cs, partVal);
++      store.updatePartitionColumnStatistics(cs, partVal, -1, null, -1);
+ 
+     }
+ 
+     Checker statChecker = new Checker() {
+       @Override
+       public void checkStats(AggrStats aggrStats) throws Exception {
+         Assert.assertEquals(10, aggrStats.getPartsFound());
+         Assert.assertEquals(1, aggrStats.getColStatsSize());
+         ColumnStatisticsObj cso = aggrStats.getColStats().get(0);
+         Assert.assertEquals("col1", cso.getColName());
+         Assert.assertEquals("bigint", cso.getColType());
+         LongColumnStatsData lcsd = cso.getStatsData().getLongStats();
+         Assert.assertEquals(1009, lcsd.getHighValue(), 0.01);
+         Assert.assertEquals(-1009, lcsd.getLowValue(), 0.01);
+         Assert.assertEquals(45, lcsd.getNumNulls());
+         Assert.assertEquals(91, lcsd.getNumDVs());
+       }
+     };
+     List<String> partNames = new ArrayList<>();
+     for (int i = 0; i < 10; i++) {
+       partNames.add("ds=" + i);
+     }
+     AggrStats aggrStats = store.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tableName, partNames,
+         Arrays.asList("col1"));
+     statChecker.checkStats(aggrStats);
+ 
+   }
+ 
+   private interface Checker {
+     void checkStats(AggrStats aggrStats) throws Exception;
+   }
+ 
+   private static void dropAllStoreObjects(RawStore store) throws MetaException,
+       InvalidObjectException, InvalidInputException {
+     try {
+       Deadline.registerIfNot(100000);
+       Deadline.startTimer("getPartition");
+       List<String> dbs = store.getAllDatabases(DEFAULT_CATALOG_NAME);
+       for (int i = 0; i < dbs.size(); i++) {
+         String db = dbs.get(i);
+         List<String> tbls = store.getAllTables(DEFAULT_CATALOG_NAME, db);
+         for (String tbl : tbls) {
+           List<Partition> parts = store.getPartitions(DEFAULT_CATALOG_NAME, db, tbl, 100);
+           for (Partition part : parts) {
+             store.dropPartition(DEFAULT_CATALOG_NAME, db, tbl, part.getValues());
+           }
+           store.dropTable(DEFAULT_CATALOG_NAME, db, tbl);
+         }
+         store.dropDatabase(DEFAULT_CATALOG_NAME, db);
+       }
+     } catch (NoSuchObjectException e) {
+     }
+   }
+ 
+ }


[36/50] [abbrv] hive git commit: HIVE-20029 : add parallel insert, analyze, iow tests (Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-20029 : add parallel insert, analyze, iow tests (Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f2d5ac22
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f2d5ac22
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f2d5ac22

Branch: refs/heads/master
Commit: f2d5ac221da8dc81292831db06a92453a5c60ff1
Parents: bdd3cec
Author: sergey <se...@apache.org>
Authored: Fri Jul 20 15:54:01 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Fri Jul 20 15:54:01 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/TestTxnCommands.java  | 189 +++++++++++++++++++
 1 file changed, 189 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/f2d5ac22/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
index 8c33f6a..3d4cb83 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
@@ -27,6 +27,11 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Timer;
 import java.util.TimerTask;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.curator.shaded.com.google.common.collect.Lists;
@@ -41,6 +46,7 @@ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
 import org.apache.hadoop.hive.metastore.api.LockState;
 import org.apache.hadoop.hive.metastore.api.LockType;
+import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
@@ -60,6 +66,7 @@ import org.apache.hadoop.hive.ql.io.BucketCodec;
 import org.apache.hadoop.hive.ql.lockmgr.TestDbTxnManager2;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
+import org.apache.hadoop.hive.ql.session.SessionState;
 import org.apache.thrift.TException;
 import org.junit.Assert;
 import org.junit.Ignore;
@@ -77,6 +84,7 @@ import org.slf4j.LoggerFactory;
  * Mostly uses bucketed tables
  */
 public class TestTxnCommands extends TxnCommandsBaseForTests {
+
   static final private Logger LOG = LoggerFactory.getLogger(TestTxnCommands.class);
   private static final String TEST_DATA_DIR = new File(System.getProperty("java.io.tmpdir") +
     File.separator + TestTxnCommands.class.getCanonicalName()
@@ -108,6 +116,7 @@ public class TestTxnCommands extends TxnCommandsBaseForTests {
     Assert.assertEquals("1", rs.get(0));
     Assert.assertEquals("5", rs.get(1));
   }
+
   @Ignore("not needed but useful for testing")
   @Test
   public void testNonAcidInsert() throws Exception {
@@ -230,6 +239,186 @@ public class TestTxnCommands extends TxnCommandsBaseForTests {
     msClient.close();
   }
 
+  private static final class QueryRunnable implements Runnable {
+    private final CountDownLatch cdlIn, cdlOut;
+    private final String query;
+    private final HiveConf hiveConf;
+
+    QueryRunnable(HiveConf hiveConf, String query, CountDownLatch cdlIn, CountDownLatch cdlOut) {
+      this.query = query;
+      this.cdlIn = cdlIn;
+      this.cdlOut = cdlOut;
+      this.hiveConf = new HiveConf(hiveConf);
+    }
+
+    @Override
+    public void run() {
+      SessionState ss = SessionState.start(hiveConf);
+      try {
+        ss.applyAuthorizationPolicy();
+      } catch (HiveException e) {
+        throw new RuntimeException(e);
+      }
+      QueryState qs = new QueryState.Builder().withHiveConf(hiveConf).nonIsolated().build();
+      Driver d = new Driver(qs, null);
+      try {
+        LOG.info("Ready to run the query: " + query);
+        syncThreadStart(cdlIn, cdlOut);
+        try {
+          CommandProcessorResponse cpr = d.run(query);
+          if(cpr.getResponseCode() != 0) {
+            throw new RuntimeException(query + " failed: " + cpr);
+          }
+          d.getResults(new ArrayList<String>());
+        } catch (Exception e) {
+          throw new RuntimeException(e);
+        }
+      } finally {
+        d.close();
+      }
+    }
+  }
+
+
+  private static void syncThreadStart(final CountDownLatch cdlIn, final CountDownLatch cdlOut) {
+    cdlIn.countDown();
+    try {
+      cdlOut.await();
+    } catch (InterruptedException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  @Test
+  public void testParallelInsertStats() throws Exception {
+    final int TASK_COUNT = 4;
+    String tableName = "mm_table";
+    List<ColumnStatisticsObj> stats;
+    IMetaStoreClient msClient = prepareParallelTest(tableName, 0);
+
+    String[] queries = new String[TASK_COUNT];
+    for (int i = 0; i < queries.length; ++i) {
+      queries[i] = String.format("insert into %s (a) values (" + i + ")", tableName);
+    }
+
+    runParallelQueries(queries);
+
+    // Verify stats are either invalid, or valid and correct.
+    stats = getTxnTableStats(msClient, tableName);
+    boolean hasStats = 0 != stats.size();
+    if (hasStats) {
+      verifyLongStats(TASK_COUNT, 0, TASK_COUNT - 1, stats);
+    }
+
+    runStatementOnDriver(String.format("insert into %s (a) values (" + TASK_COUNT + ")", tableName));
+    if (!hasStats) {
+      // Stats should still be invalid if they were invalid.
+      stats = getTxnTableStats(msClient, tableName);
+      Assert.assertEquals(0, stats.size());
+    }
+
+    // Stats should be valid after analyze.
+    runStatementOnDriver(String.format("analyze table %s compute statistics for columns", tableName));
+    verifyLongStats(TASK_COUNT + 1, 0, TASK_COUNT, getTxnTableStats(msClient, tableName));
+  }
+
+  private void verifyLongStats(int dvCount, int min, int max, List<ColumnStatisticsObj> stats) {
+    Assert.assertEquals(1, stats.size());
+    LongColumnStatsData data = stats.get(0).getStatsData().getLongStats();
+    Assert.assertEquals(min, data.getLowValue());
+    Assert.assertEquals(max, data.getHighValue());
+    Assert.assertEquals(dvCount, data.getNumDVs());
+  }
+
+  private void runParallelQueries(String[] queries)
+      throws InterruptedException, ExecutionException {
+    ExecutorService executor = Executors.newFixedThreadPool(queries.length);
+    final CountDownLatch cdlIn = new CountDownLatch(queries.length), cdlOut = new CountDownLatch(1);
+    Future<?>[] tasks = new Future[queries.length];
+    for (int i = 0; i < tasks.length; ++i) {
+      tasks[i] = executor.submit(new QueryRunnable(hiveConf, queries[i], cdlIn, cdlOut));
+    }
+    cdlIn.await(); // Wait for all threads to be ready.
+    cdlOut.countDown(); // Release them at the same time.
+    for (int i = 0; i < tasks.length; ++i) {
+      tasks[i].get();
+    }
+  }
+
+  private IMetaStoreClient prepareParallelTest(String tableName, int val)
+      throws Exception, MetaException, TException, NoSuchObjectException {
+    hiveConf.setBoolean("hive.stats.autogather", true);
+    hiveConf.setBoolean("hive.stats.column.autogather", true);
+    runStatementOnDriver("drop table if exists " + tableName);
+    runStatementOnDriver(String.format("create table %s (a int) stored as orc " +
+        "TBLPROPERTIES ('transactional'='true', 'transactional_properties'='insert_only')",
+        tableName));
+    runStatementOnDriver(String.format("insert into %s (a) values (" + val + ")", tableName));
+    runStatementOnDriver(String.format("insert into %s (a) values (" + val + ")", tableName));
+    IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf);
+    // Stats should be valid after serial inserts.
+    List<ColumnStatisticsObj> stats = getTxnTableStats(msClient, tableName);
+    Assert.assertEquals(1, stats.size());
+    return msClient;
+  }
+
+
+  @Test
+  public void testParallelInsertAnalyzeStats() throws Exception {
+    String tableName = "mm_table";
+    List<ColumnStatisticsObj> stats;
+    IMetaStoreClient msClient = prepareParallelTest(tableName, 0);
+
+    String[] queries = {
+        String.format("insert into %s (a) values (999)", tableName),
+        String.format("analyze table %s compute statistics for columns", tableName)
+    };
+    runParallelQueries(queries);
+
+    // Verify stats are either invalid, or valid and correct.
+    stats = getTxnTableStats(msClient, tableName);
+    boolean hasStats = 0 != stats.size();
+    if (hasStats) {
+      verifyLongStats(2, 0, 999, stats);
+    }
+
+    runStatementOnDriver(String.format("insert into %s (a) values (1000)", tableName));
+    if (!hasStats) {
+      // Stats should still be invalid if they were invalid.
+      stats = getTxnTableStats(msClient, tableName);
+      Assert.assertEquals(0, stats.size());
+    }
+
+    // Stats should be valid after analyze.
+    runStatementOnDriver(String.format("analyze table %s compute statistics for columns", tableName));
+    verifyLongStats(3, 0, 1000, getTxnTableStats(msClient, tableName));
+  }
+
+  // TODO## this test is broken; would probably be fixed by HIVE-20046
+  @Test
+  public void testParallelTruncateAnalyzeStats() throws Exception {
+    String tableName = "mm_table";
+    List<ColumnStatisticsObj> stats;
+    IMetaStoreClient msClient = prepareParallelTest(tableName, 0);
+
+    String[] queries = {
+        String.format("truncate table %s", tableName),
+        String.format("analyze table %s compute statistics for columns", tableName)
+    };
+    runParallelQueries(queries);
+
+    // Verify stats are either invalid, or valid and correct.
+    stats = getTxnTableStats(msClient, tableName);
+    boolean hasStats = 0 != stats.size();
+    if (hasStats) {
+      verifyLongStats(0, 0, 0, stats);
+    }
+
+    // Stats should be valid after analyze.
+    runStatementOnDriver(String.format("analyze table %s compute statistics for columns", tableName));
+    verifyLongStats(0, 0, 0, getTxnTableStats(msClient, tableName));
+  }
+
 
   @Test
   public void testTxnStatsOnOff() throws Exception {


[07/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
index 0000000,d91f737..bc04e06
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
@@@ -1,0 -1,3424 +1,3546 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
++import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependCatalogToDbName;
+ 
+ import java.io.IOException;
+ import java.lang.reflect.Constructor;
+ import java.lang.reflect.InvocationHandler;
+ import java.lang.reflect.InvocationTargetException;
+ import java.lang.reflect.Method;
+ import java.lang.reflect.Proxy;
+ import java.net.InetAddress;
+ import java.net.URI;
+ import java.net.UnknownHostException;
+ import java.nio.ByteBuffer;
+ import java.security.PrivilegedExceptionAction;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collection;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.Iterator;
+ import java.util.LinkedHashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Map.Entry;
+ import java.util.NoSuchElementException;
+ import java.util.Random;
+ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.atomic.AtomicInteger;
+ 
+ import javax.security.auth.login.LoginException;
+ 
+ import org.apache.hadoop.classification.InterfaceAudience;
+ import org.apache.hadoop.classification.InterfaceStability;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.StatsSetupConst;
+ import org.apache.hadoop.hive.common.ValidTxnList;
+ import org.apache.hadoop.hive.common.ValidWriteIdList;
+ import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.hooks.URIResolverHook;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+ import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+ import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.hadoop.hive.metastore.utils.ObjectPair;
+ import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+ import org.apache.hadoop.security.UserGroupInformation;
+ import org.apache.hadoop.util.ReflectionUtils;
+ import org.apache.hadoop.util.StringUtils;
+ import org.apache.thrift.TApplicationException;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.protocol.TBinaryProtocol;
+ import org.apache.thrift.protocol.TCompactProtocol;
+ import org.apache.thrift.protocol.TProtocol;
+ import org.apache.thrift.transport.TFramedTransport;
+ import org.apache.thrift.transport.TSocket;
+ import org.apache.thrift.transport.TTransport;
+ import org.apache.thrift.transport.TTransportException;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import com.google.common.annotations.VisibleForTesting;
+ import com.google.common.collect.Lists;
+ 
+ /**
+  * Hive Metastore Client.
+  * The public implementation of IMetaStoreClient. Methods not inherited from IMetaStoreClient
+  * are not public and can change. Hence this is marked as unstable.
+  * For users who require retry mechanism when the connection between metastore and client is
+  * broken, RetryingMetaStoreClient class should be used.
+  */
+ @InterfaceAudience.Public
+ @InterfaceStability.Evolving
+ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoCloseable {
+   /**
+    * Capabilities of the current client. If this client talks to a MetaStore server in a manner
+    * implying the usage of some expanded features that require client-side support that this client
+    * doesn't have (e.g. a getting a table of a new type), it will get back failures when the
+    * capability checking is enabled (the default).
+    */
+   public final static ClientCapabilities VERSION = new ClientCapabilities(
+       Lists.newArrayList(ClientCapability.INSERT_ONLY_TABLES));
+   // Test capability for tests.
+   public final static ClientCapabilities TEST_VERSION = new ClientCapabilities(
+       Lists.newArrayList(ClientCapability.INSERT_ONLY_TABLES, ClientCapability.TEST_CAPABILITY));
+ 
+   ThriftHiveMetastore.Iface client = null;
+   private TTransport transport = null;
+   private boolean isConnected = false;
+   private URI metastoreUris[];
+   private final HiveMetaHookLoader hookLoader;
+   protected final Configuration conf;  // Keep a copy of HiveConf so if Session conf changes, we may need to get a new HMS client.
+   protected boolean fastpath = false;
+   private String tokenStrForm;
+   private final boolean localMetaStore;
+   private final MetaStoreFilterHook filterHook;
+   private final URIResolverHook uriResolverHook;
+   private final int fileMetadataBatchSize;
+ 
+   private Map<String, String> currentMetaVars;
+ 
+   private static final AtomicInteger connCount = new AtomicInteger(0);
+ 
+   // for thrift connects
+   private int retries = 5;
+   private long retryDelaySeconds = 0;
+   private final ClientCapabilities version;
+ 
+   static final protected Logger LOG = LoggerFactory.getLogger(HiveMetaStoreClientPreCatalog.class);
+ 
+   public HiveMetaStoreClientPreCatalog(Configuration conf) throws MetaException {
+     this(conf, null, true);
+   }
+ 
+   public HiveMetaStoreClientPreCatalog(Configuration conf, HiveMetaHookLoader hookLoader) throws MetaException {
+     this(conf, hookLoader, true);
+   }
+ 
+   public HiveMetaStoreClientPreCatalog(Configuration conf, HiveMetaHookLoader hookLoader, Boolean allowEmbedded)
+     throws MetaException {
+ 
+     this.hookLoader = hookLoader;
+     if (conf == null) {
+       conf = MetastoreConf.newMetastoreConf();
+       this.conf = conf;
+     } else {
+       this.conf = new Configuration(conf);
+     }
+     version = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) ? TEST_VERSION : VERSION;
+     filterHook = loadFilterHooks();
+     uriResolverHook = loadUriResolverHook();
+     fileMetadataBatchSize = MetastoreConf.getIntVar(
+         conf, ConfVars.BATCH_RETRIEVE_OBJECTS_MAX);
+ 
+     String msUri = MetastoreConf.getVar(conf, ConfVars.THRIFT_URIS);
+     localMetaStore = MetastoreConf.isEmbeddedMetaStore(msUri);
+     if (localMetaStore) {
+       if (!allowEmbedded) {
+         throw new MetaException("Embedded metastore is not allowed here. Please configure "
+             + ConfVars.THRIFT_URIS.toString() + "; it is currently set to [" + msUri + "]");
+       }
+       // instantiate the metastore server handler directly instead of connecting
+       // through the network
+       client = HiveMetaStore.newRetryingHMSHandler("hive client", this.conf, true);
+       isConnected = true;
+       snapshotActiveConf();
+       return;
+     }
+ 
+     // get the number retries
+     retries = MetastoreConf.getIntVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES);
+     retryDelaySeconds = MetastoreConf.getTimeVar(conf,
+         ConfVars.CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS);
+ 
+     // user wants file store based configuration
+     if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URIS) != null) {
+       resolveUris();
+     } else {
+       LOG.error("NOT getting uris from conf");
+       throw new MetaException("MetaStoreURIs not found in conf file");
+     }
+ 
+     //If HADOOP_PROXY_USER is set in env or property,
+     //then need to create metastore client that proxies as that user.
+     String HADOOP_PROXY_USER = "HADOOP_PROXY_USER";
+     String proxyUser = System.getenv(HADOOP_PROXY_USER);
+     if (proxyUser == null) {
+       proxyUser = System.getProperty(HADOOP_PROXY_USER);
+     }
+     //if HADOOP_PROXY_USER is set, create DelegationToken using real user
+     if(proxyUser != null) {
+       LOG.info(HADOOP_PROXY_USER + " is set. Using delegation "
+           + "token for HiveMetaStore connection.");
+       try {
+         UserGroupInformation.getLoginUser().getRealUser().doAs(
+             new PrivilegedExceptionAction<Void>() {
+               @Override
+               public Void run() throws Exception {
+                 open();
+                 return null;
+               }
+             });
+         String delegationTokenPropString = "DelegationTokenForHiveMetaStoreServer";
+         String delegationTokenStr = getDelegationToken(proxyUser, proxyUser);
+         SecurityUtils.setTokenStr(UserGroupInformation.getCurrentUser(), delegationTokenStr,
+             delegationTokenPropString);
+         MetastoreConf.setVar(this.conf, ConfVars.TOKEN_SIGNATURE, delegationTokenPropString);
+         close();
+       } catch (Exception e) {
+         LOG.error("Error while setting delegation token for " + proxyUser, e);
+         if(e instanceof MetaException) {
+           throw (MetaException)e;
+         } else {
+           throw new MetaException(e.getMessage());
+         }
+       }
+     }
+     // finally open the store
+     open();
+   }
+ 
+   private void resolveUris() throws MetaException {
+     String metastoreUrisString[] =  MetastoreConf.getVar(conf,
+             ConfVars.THRIFT_URIS).split(",");
+ 
+     List<URI> metastoreURIArray = new ArrayList<URI>();
+     try {
+       int i = 0;
+       for (String s : metastoreUrisString) {
+         URI tmpUri = new URI(s);
+         if (tmpUri.getScheme() == null) {
+           throw new IllegalArgumentException("URI: " + s
+                   + " does not have a scheme");
+         }
+         if (uriResolverHook != null) {
+           metastoreURIArray.addAll(uriResolverHook.resolveURI(tmpUri));
+         } else {
+           metastoreURIArray.add(new URI(
+                   tmpUri.getScheme(),
+                   tmpUri.getUserInfo(),
+                   HadoopThriftAuthBridge.getBridge().getCanonicalHostName(tmpUri.getHost()),
+                   tmpUri.getPort(),
+                   tmpUri.getPath(),
+                   tmpUri.getQuery(),
+                   tmpUri.getFragment()
+           ));
+         }
+       }
+       metastoreUris = new URI[metastoreURIArray.size()];
+       for (int j = 0; j < metastoreURIArray.size(); j++) {
+         metastoreUris[j] = metastoreURIArray.get(j);
+       }
+ 
+       if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URI_SELECTION).equalsIgnoreCase("RANDOM")) {
+         List uriList = Arrays.asList(metastoreUris);
+         Collections.shuffle(uriList);
+         metastoreUris = (URI[]) uriList.toArray();
+       }
+     } catch (IllegalArgumentException e) {
+       throw (e);
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+   }
+ 
+ 
+   private MetaStoreFilterHook loadFilterHooks() throws IllegalStateException {
+     Class<? extends MetaStoreFilterHook> authProviderClass = MetastoreConf.
+         getClass(conf, ConfVars.FILTER_HOOK, DefaultMetaStoreFilterHookImpl.class,
+             MetaStoreFilterHook.class);
+     String msg = "Unable to create instance of " + authProviderClass.getName() + ": ";
+     try {
+       Constructor<? extends MetaStoreFilterHook> constructor =
+           authProviderClass.getConstructor(Configuration.class);
+       return constructor.newInstance(conf);
+     } catch (NoSuchMethodException | SecurityException | IllegalAccessException | InstantiationException | IllegalArgumentException | InvocationTargetException e) {
+       throw new IllegalStateException(msg + e.getMessage(), e);
+     }
+   }
+ 
+   //multiple clients may initialize the hook at the same time
+   synchronized private URIResolverHook loadUriResolverHook() throws IllegalStateException {
+ 
+     String uriResolverClassName =
+             MetastoreConf.getAsString(conf, ConfVars.URI_RESOLVER);
+     if (uriResolverClassName.equals("")) {
+       return null;
+     } else {
+       LOG.info("Loading uri resolver" + uriResolverClassName);
+       try {
+         Class<?> uriResolverClass = Class.forName(uriResolverClassName, true,
+                 JavaUtils.getClassLoader());
+         return (URIResolverHook) ReflectionUtils.newInstance(uriResolverClass, null);
+       } catch (Exception e) {
+         LOG.error("Exception loading uri resolver hook" + e);
+         return null;
+       }
+     }
+   }
+ 
+   /**
+    * Swaps the first element of the metastoreUris array with a random element from the
+    * remainder of the array.
+    */
+   private void promoteRandomMetaStoreURI() {
+     if (metastoreUris.length <= 1) {
+       return;
+     }
+     Random rng = new Random();
+     int index = rng.nextInt(metastoreUris.length - 1) + 1;
+     URI tmp = metastoreUris[0];
+     metastoreUris[0] = metastoreUris[index];
+     metastoreUris[index] = tmp;
+   }
+ 
+   @VisibleForTesting
+   public TTransport getTTransport() {
+     return transport;
+   }
+ 
+   @Override
+   public boolean isLocalMetaStore() {
+     return localMetaStore;
+   }
+ 
+   @Override
+   public boolean isCompatibleWith(Configuration conf) {
+     // Make a copy of currentMetaVars, there is a race condition that
+ 	// currentMetaVars might be changed during the execution of the method
+     Map<String, String> currentMetaVarsCopy = currentMetaVars;
+     if (currentMetaVarsCopy == null) {
+       return false; // recreate
+     }
+     boolean compatible = true;
+     for (ConfVars oneVar : MetastoreConf.metaVars) {
+       // Since metaVars are all of different types, use string for comparison
+       String oldVar = currentMetaVarsCopy.get(oneVar.getVarname());
+       String newVar = MetastoreConf.getAsString(conf, oneVar);
+       if (oldVar == null ||
+           (oneVar.isCaseSensitive() ? !oldVar.equals(newVar) : !oldVar.equalsIgnoreCase(newVar))) {
+         LOG.info("Mestastore configuration " + oneVar.toString() +
+             " changed from " + oldVar + " to " + newVar);
+         compatible = false;
+       }
+     }
+     return compatible;
+   }
+ 
+   @Override
+   public void setHiveAddedJars(String addedJars) {
+     MetastoreConf.setVar(conf, ConfVars.ADDED_JARS, addedJars);
+   }
+ 
+   @Override
+   public void reconnect() throws MetaException {
+     if (localMetaStore) {
+       // For direct DB connections we don't yet support reestablishing connections.
+       throw new MetaException("For direct MetaStore DB connections, we don't support retries" +
+           " at the client level.");
+     } else {
+       close();
+ 
+       if (uriResolverHook != null) {
+         //for dynamic uris, re-lookup if there are new metastore locations
+         resolveUris();
+       }
+ 
+       if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URI_SELECTION).equalsIgnoreCase("RANDOM")) {
+         // Swap the first element of the metastoreUris[] with a random element from the rest
+         // of the array. Rationale being that this method will generally be called when the default
+         // connection has died and the default connection is likely to be the first array element.
+         promoteRandomMetaStoreURI();
+       }
+       open();
+     }
+   }
+ 
+   /**
+    * @param dbname
+    * @param tbl_name
+    * @param new_tbl
+    * @throws InvalidOperationException
+    * @throws MetaException
+    * @throws TException
+    * @see
+    *   org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#alter_table(
+    *   java.lang.String, java.lang.String,
+    *   org.apache.hadoop.hive.metastore.api.Table)
+    */
+   @Override
+   public void alter_table(String dbname, String tbl_name, Table new_tbl)
+       throws InvalidOperationException, MetaException, TException {
+     alter_table_with_environmentContext(dbname, tbl_name, new_tbl, null);
+   }
+ 
+   @Override
+   public void alter_table(String defaultDatabaseName, String tblName, Table table,
+       boolean cascade) throws InvalidOperationException, MetaException, TException {
+     EnvironmentContext environmentContext = new EnvironmentContext();
+     if (cascade) {
+       environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE);
+     }
+     alter_table_with_environmentContext(defaultDatabaseName, tblName, table, environmentContext);
+   }
+ 
+   @Override
+   public void alter_table_with_environmentContext(String dbname, String tbl_name, Table new_tbl,
+       EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException {
+     client.alter_table_with_environment_context(dbname, tbl_name, new_tbl, envContext);
+   }
+ 
+   /**
+    * @param dbname
+    * @param name
+    * @param part_vals
+    * @param newPart
+    * @throws InvalidOperationException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#rename_partition(
+    *      java.lang.String, java.lang.String, java.util.List, org.apache.hadoop.hive.metastore.api.Partition)
+    */
+   @Override
+   public void renamePartition(final String dbname, final String name, final List<String> part_vals, final Partition newPart)
+       throws InvalidOperationException, MetaException, TException {
+     client.rename_partition(dbname, name, part_vals, newPart);
+   }
+ 
+   private void open() throws MetaException {
+     isConnected = false;
+     TTransportException tte = null;
+     boolean useSSL = MetastoreConf.getBoolVar(conf, ConfVars.USE_SSL);
+     boolean useSasl = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_SASL);
+     boolean useFramedTransport = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_FRAMED_TRANSPORT);
+     boolean useCompactProtocol = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_COMPACT_PROTOCOL);
+     int clientSocketTimeout = (int) MetastoreConf.getTimeVar(conf,
+         ConfVars.CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS);
+ 
+     for (int attempt = 0; !isConnected && attempt < retries; ++attempt) {
+       for (URI store : metastoreUris) {
+         LOG.info("Trying to connect to metastore with URI " + store);
+ 
+         try {
+           if (useSSL) {
+             try {
+               String trustStorePath = MetastoreConf.getVar(conf, ConfVars.SSL_TRUSTSTORE_PATH).trim();
+               if (trustStorePath.isEmpty()) {
+                 throw new IllegalArgumentException(ConfVars.SSL_TRUSTSTORE_PATH.toString()
+                     + " Not configured for SSL connection");
+               }
+               String trustStorePassword =
+                   MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.SSL_TRUSTSTORE_PASSWORD);
+ 
+               // Create an SSL socket and connect
+               transport = SecurityUtils.getSSLSocket(store.getHost(), store.getPort(), clientSocketTimeout,
+                   trustStorePath, trustStorePassword );
+               LOG.info("Opened an SSL connection to metastore, current connections: " + connCount.incrementAndGet());
+             } catch(IOException e) {
+               throw new IllegalArgumentException(e);
+             } catch(TTransportException e) {
+               tte = e;
+               throw new MetaException(e.toString());
+             }
+           } else {
+             transport = new TSocket(store.getHost(), store.getPort(), clientSocketTimeout);
+           }
+ 
+           if (useSasl) {
+             // Wrap thrift connection with SASL for secure connection.
+             try {
+               HadoopThriftAuthBridge.Client authBridge =
+                 HadoopThriftAuthBridge.getBridge().createClient();
+ 
+               // check if we should use delegation tokens to authenticate
+               // the call below gets hold of the tokens if they are set up by hadoop
+               // this should happen on the map/reduce tasks if the client added the
+               // tokens into hadoop's credential store in the front end during job
+               // submission.
+               String tokenSig = MetastoreConf.getVar(conf, ConfVars.TOKEN_SIGNATURE);
+               // tokenSig could be null
+               tokenStrForm = SecurityUtils.getTokenStrForm(tokenSig);
+ 
+               if(tokenStrForm != null) {
+                 LOG.info("HMSC::open(): Found delegation token. Creating DIGEST-based thrift connection.");
+                 // authenticate using delegation tokens via the "DIGEST" mechanism
+                 transport = authBridge.createClientTransport(null, store.getHost(),
+                     "DIGEST", tokenStrForm, transport,
+                         MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL));
+               } else {
+                 LOG.info("HMSC::open(): Could not find delegation token. Creating KERBEROS-based thrift connection.");
+                 String principalConfig =
+                     MetastoreConf.getVar(conf, ConfVars.KERBEROS_PRINCIPAL);
+                 transport = authBridge.createClientTransport(
+                     principalConfig, store.getHost(), "KERBEROS", null,
+                     transport, MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL));
+               }
+             } catch (IOException ioe) {
+               LOG.error("Couldn't create client transport", ioe);
+               throw new MetaException(ioe.toString());
+             }
+           } else {
+             if (useFramedTransport) {
+               transport = new TFramedTransport(transport);
+             }
+           }
+ 
+           final TProtocol protocol;
+           if (useCompactProtocol) {
+             protocol = new TCompactProtocol(transport);
+           } else {
+             protocol = new TBinaryProtocol(transport);
+           }
+           client = new ThriftHiveMetastore.Client(protocol);
+           try {
+             if (!transport.isOpen()) {
+               transport.open();
+               LOG.info("Opened a connection to metastore, current connections: " + connCount.incrementAndGet());
+             }
+             isConnected = true;
+           } catch (TTransportException e) {
+             tte = e;
+             if (LOG.isDebugEnabled()) {
+               LOG.warn("Failed to connect to the MetaStore Server...", e);
+             } else {
+               // Don't print full exception trace if DEBUG is not on.
+               LOG.warn("Failed to connect to the MetaStore Server...");
+             }
+           }
+ 
+           if (isConnected && !useSasl && MetastoreConf.getBoolVar(conf, ConfVars.EXECUTE_SET_UGI)){
+             // Call set_ugi, only in unsecure mode.
+             try {
+               UserGroupInformation ugi = SecurityUtils.getUGI();
+               client.set_ugi(ugi.getUserName(), Arrays.asList(ugi.getGroupNames()));
+             } catch (LoginException e) {
+               LOG.warn("Failed to do login. set_ugi() is not successful, " +
+                        "Continuing without it.", e);
+             } catch (IOException e) {
+               LOG.warn("Failed to find ugi of client set_ugi() is not successful, " +
+                   "Continuing without it.", e);
+             } catch (TException e) {
+               LOG.warn("set_ugi() not successful, Likely cause: new client talking to old server. "
+                   + "Continuing without it.", e);
+             }
+           }
+         } catch (MetaException e) {
+           LOG.error("Unable to connect to metastore with URI " + store
+                     + " in attempt " + attempt, e);
+         }
+         if (isConnected) {
+           break;
+         }
+       }
+       // Wait before launching the next round of connection retries.
+       if (!isConnected && retryDelaySeconds > 0) {
+         try {
+           LOG.info("Waiting " + retryDelaySeconds + " seconds before next connection attempt.");
+           Thread.sleep(retryDelaySeconds * 1000);
+         } catch (InterruptedException ignore) {}
+       }
+     }
+ 
+     if (!isConnected) {
+       throw new MetaException("Could not connect to meta store using any of the URIs provided." +
+         " Most recent failure: " + StringUtils.stringifyException(tte));
+     }
+ 
+     snapshotActiveConf();
+ 
+     LOG.info("Connected to metastore.");
+   }
+ 
+   private void snapshotActiveConf() {
+     currentMetaVars = new HashMap<>(MetastoreConf.metaVars.length);
+     for (ConfVars oneVar : MetastoreConf.metaVars) {
+       currentMetaVars.put(oneVar.getVarname(), MetastoreConf.getAsString(conf, oneVar));
+     }
+   }
+ 
+   @Override
+   public String getTokenStrForm() throws IOException {
+     return tokenStrForm;
+    }
+ 
+   @Override
+   public void close() {
+     isConnected = false;
+     currentMetaVars = null;
+     try {
+       if (null != client) {
+         client.shutdown();
+       }
+     } catch (TException e) {
+       LOG.debug("Unable to shutdown metastore client. Will try closing transport directly.", e);
+     }
+     // Transport would have got closed via client.shutdown(), so we dont need this, but
+     // just in case, we make this call.
+     if ((transport != null) && transport.isOpen()) {
+       transport.close();
+       LOG.info("Closed a connection to metastore, current connections: " + connCount.decrementAndGet());
+     }
+   }
+ 
+   @Override
+   public void setMetaConf(String key, String value) throws TException {
+     client.setMetaConf(key, value);
+   }
+ 
+   @Override
+   public String getMetaConf(String key) throws TException {
+     return client.getMetaConf(key);
+   }
+ 
+   /**
+    * @param new_part
+    * @return the added partition
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partition(org.apache.hadoop.hive.metastore.api.Partition)
+    */
+   @Override
+   public Partition add_partition(Partition new_part) throws TException {
+     return add_partition(new_part, null);
+   }
+ 
+   public Partition add_partition(Partition new_part, EnvironmentContext envContext)
+       throws TException {
+     Partition p = client.add_partition_with_environment_context(new_part, envContext);
+     return fastpath ? p : deepCopy(p);
+   }
+ 
+   /**
+    * @param new_parts
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partitions(List)
+    */
+   @Override
+   public int add_partitions(List<Partition> new_parts) throws TException {
+     return client.add_partitions(new_parts);
+   }
+ 
+   @Override
+   public List<Partition> add_partitions(
+       List<Partition> parts, boolean ifNotExists, boolean needResults) throws TException {
+     if (parts.isEmpty()) {
+       return needResults ? new ArrayList<>() : null;
+     }
+     Partition part = parts.get(0);
+     AddPartitionsRequest req = new AddPartitionsRequest(
+         part.getDbName(), part.getTableName(), parts, ifNotExists);
+     req.setNeedResult(needResults);
+     AddPartitionsResult result = client.add_partitions_req(req);
+     return needResults ? filterHook.filterPartitions(result.getPartitions()) : null;
+   }
+ 
+   @Override
+   public int add_partitions_pspec(PartitionSpecProxy partitionSpec) throws TException {
+     return client.add_partitions_pspec(partitionSpec.toPartitionSpec());
+   }
+ 
+   /**
+    * @param table_name
+    * @param db_name
+    * @param part_vals
+    * @return the appended partition
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#append_partition(java.lang.String,
+    *      java.lang.String, java.util.List)
+    */
+   @Override
+   public Partition appendPartition(String db_name, String table_name,
+       List<String> part_vals) throws TException {
+     return appendPartition(db_name, table_name, part_vals, null);
+   }
+ 
+   public Partition appendPartition(String db_name, String table_name, List<String> part_vals,
+       EnvironmentContext envContext) throws TException {
+     Partition p = client.append_partition_with_environment_context(db_name, table_name,
+         part_vals, envContext);
+     return fastpath ? p : deepCopy(p);
+   }
+ 
+   @Override
+   public Partition appendPartition(String dbName, String tableName, String partName)
+       throws TException {
+     return appendPartition(dbName, tableName, partName, (EnvironmentContext)null);
+   }
+ 
+   public Partition appendPartition(String dbName, String tableName, String partName,
+       EnvironmentContext envContext) throws TException {
+     Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName,
+         partName, envContext);
+     return fastpath ? p : deepCopy(p);
+   }
+ 
+   /**
+    * Exchange the partition between two tables
+    * @param partitionSpecs partitions specs of the parent partition to be exchanged
+    * @param destDb the db of the destination table
+    * @param destinationTableName the destination table name
+    * @return new partition after exchanging
+    */
+   @Override
+   public Partition exchange_partition(Map<String, String> partitionSpecs,
+       String sourceDb, String sourceTable, String destDb,
+       String destinationTableName) throws MetaException,
+       NoSuchObjectException, InvalidObjectException, TException {
+     return client.exchange_partition(partitionSpecs, sourceDb, sourceTable,
+         destDb, destinationTableName);
+   }
+ 
+   /**
+    * Exchange the partitions between two tables
+    * @param partitionSpecs partitions specs of the parent partition to be exchanged
+    * @param destDb the db of the destination table
+    * @param destinationTableName the destination table name
+    * @return new partitions after exchanging
+    */
+   @Override
+   public List<Partition> exchange_partitions(Map<String, String> partitionSpecs,
+       String sourceDb, String sourceTable, String destDb,
+       String destinationTableName) throws MetaException,
+       NoSuchObjectException, InvalidObjectException, TException {
+     return client.exchange_partitions(partitionSpecs, sourceDb, sourceTable,
+         destDb, destinationTableName);
+   }
+ 
+   @Override
+   public void validatePartitionNameCharacters(List<String> partVals)
+       throws TException, MetaException {
+     client.partition_name_has_valid_characters(partVals, true);
+   }
+ 
+   /**
+    * Create a new Database
+    * @param db
+    * @throws AlreadyExistsException
+    * @throws InvalidObjectException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_database(Database)
+    */
+   @Override
+   public void createDatabase(Database db)
+       throws AlreadyExistsException, InvalidObjectException, MetaException, TException {
+     client.create_database(db);
+   }
+ 
+   /**
+    * @param tbl
+    * @throws MetaException
+    * @throws NoSuchObjectException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table)
+    */
+   @Override
+   public void createTable(Table tbl) throws AlreadyExistsException,
+       InvalidObjectException, MetaException, NoSuchObjectException, TException {
+     createTable(tbl, null);
+   }
+ 
+   public void createTable(Table tbl, EnvironmentContext envContext) throws AlreadyExistsException,
+       InvalidObjectException, MetaException, NoSuchObjectException, TException {
+     HiveMetaHook hook = getHook(tbl);
+     if (hook != null) {
+       hook.preCreateTable(tbl);
+     }
+     boolean success = false;
+     try {
+       // Subclasses can override this step (for example, for temporary tables)
+       create_table_with_environment_context(tbl, envContext);
+       if (hook != null) {
+         hook.commitCreateTable(tbl);
+       }
+       success = true;
+     }
+     finally {
+       if (!success && (hook != null)) {
+         try {
+           hook.rollbackCreateTable(tbl);
+         } catch (Exception e){
+           LOG.error("Create rollback failed with", e);
+         }
+       }
+     }
+   }
+ 
+   @Override
+   public void createTableWithConstraints(Table tbl,
+       List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys,
+       List<SQLUniqueConstraint> uniqueConstraints,
+       List<SQLNotNullConstraint> notNullConstraints,
+       List<SQLDefaultConstraint> defaultConstraints,
+       List<SQLCheckConstraint> checkConstraints)
+         throws AlreadyExistsException, InvalidObjectException,
+         MetaException, NoSuchObjectException, TException {
+     HiveMetaHook hook = getHook(tbl);
+     if (hook != null) {
+       hook.preCreateTable(tbl);
+     }
+     boolean success = false;
+     try {
+       // Subclasses can override this step (for example, for temporary tables)
+       client.create_table_with_constraints(tbl, primaryKeys, foreignKeys,
+           uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
+       if (hook != null) {
+         hook.commitCreateTable(tbl);
+       }
+       success = true;
+     } finally {
+       if (!success && (hook != null)) {
+         hook.rollbackCreateTable(tbl);
+       }
+     }
+   }
+ 
+   @Override
+   public void dropConstraint(String dbName, String tableName, String constraintName) throws
+     NoSuchObjectException, MetaException, TException {
+     client.drop_constraint(new DropConstraintRequest(dbName, tableName, constraintName));
+   }
+ 
+   @Override
+   public void addPrimaryKey(List<SQLPrimaryKey> primaryKeyCols) throws
+     NoSuchObjectException, MetaException, TException {
+     client.add_primary_key(new AddPrimaryKeyRequest(primaryKeyCols));
+   }
+ 
+   @Override
+   public void addForeignKey(List<SQLForeignKey> foreignKeyCols) throws
+     NoSuchObjectException, MetaException, TException {
+     client.add_foreign_key(new AddForeignKeyRequest(foreignKeyCols));
+   }
+ 
+   @Override
+   public void addUniqueConstraint(List<SQLUniqueConstraint> uniqueConstraintCols) throws
+     NoSuchObjectException, MetaException, TException {
+     client.add_unique_constraint(new AddUniqueConstraintRequest(uniqueConstraintCols));
+   }
+ 
+   @Override
+   public void addNotNullConstraint(List<SQLNotNullConstraint> notNullConstraintCols) throws
+     NoSuchObjectException, MetaException, TException {
+     client.add_not_null_constraint(new AddNotNullConstraintRequest(notNullConstraintCols));
+   }
+ 
+   @Override
+   public void addDefaultConstraint(List<SQLDefaultConstraint> defaultConstraints) throws
+       NoSuchObjectException, MetaException, TException {
+     client.add_default_constraint(new AddDefaultConstraintRequest(defaultConstraints));
+   }
+ 
+   @Override
+   public void addCheckConstraint(List<SQLCheckConstraint> checkConstraints) throws MetaException,
+       NoSuchObjectException, TException {
+     client.add_check_constraint(new AddCheckConstraintRequest(checkConstraints));
+   }
+ 
+   /**
+    * @param type
+    * @return true or false
+    * @throws AlreadyExistsException
+    * @throws InvalidObjectException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_type(org.apache.hadoop.hive.metastore.api.Type)
+    */
+   public boolean createType(Type type) throws AlreadyExistsException,
+       InvalidObjectException, MetaException, TException {
+     return client.create_type(type);
+   }
+ 
+   /**
+    * @param name
+    * @throws NoSuchObjectException
+    * @throws InvalidOperationException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_database(java.lang.String, boolean, boolean)
+    */
+   @Override
+   public void dropDatabase(String name)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     dropDatabase(name, true, false, false);
+   }
+ 
+   @Override
+   public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     dropDatabase(name, deleteData, ignoreUnknownDb, false);
+   }
+ 
+   @Override
+   public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     try {
+       getDatabase(name);
+     } catch (NoSuchObjectException e) {
+       if (!ignoreUnknownDb) {
+         throw e;
+       }
+       return;
+     }
+ 
+     if (cascade) {
+        List<String> tableList = getAllTables(name);
+        for (String table : tableList) {
+          try {
+            // Subclasses can override this step (for example, for temporary tables)
+            dropTable(name, table, deleteData, true);
+          } catch (UnsupportedOperationException e) {
+            // Ignore Index tables, those will be dropped with parent tables
+          }
+         }
+     }
+     client.drop_database(name, deleteData, cascade);
+   }
+ 
+   /**
+    * @param tbl_name
+    * @param db_name
+    * @param part_vals
+    * @return true or false
+    * @throws NoSuchObjectException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String,
+    *      java.lang.String, java.util.List, boolean)
+    */
+   public boolean dropPartition(String db_name, String tbl_name,
+       List<String> part_vals) throws NoSuchObjectException, MetaException,
+       TException {
+     return dropPartition(db_name, tbl_name, part_vals, true, null);
+   }
+ 
+   public boolean dropPartition(String db_name, String tbl_name, List<String> part_vals,
+       EnvironmentContext env_context) throws NoSuchObjectException, MetaException, TException {
+     return dropPartition(db_name, tbl_name, part_vals, true, env_context);
+   }
+ 
+   @Override
+   public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData)
+       throws NoSuchObjectException, MetaException, TException {
+     return dropPartition(dbName, tableName, partName, deleteData, null);
+   }
+ 
+   private static EnvironmentContext getEnvironmentContextWithIfPurgeSet() {
+     Map<String, String> warehouseOptions = new HashMap<>();
+     warehouseOptions.put("ifPurge", "TRUE");
+     return new EnvironmentContext(warehouseOptions);
+   }
+ 
+   /*
+   public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData, boolean ifPurge)
+       throws NoSuchObjectException, MetaException, TException {
+ 
+     return dropPartition(dbName, tableName, partName, deleteData,
+                          ifPurge? getEnvironmentContextWithIfPurgeSet() : null);
+   }
+   */
+ 
+   public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData,
+       EnvironmentContext envContext) throws NoSuchObjectException, MetaException, TException {
+     return client.drop_partition_by_name_with_environment_context(dbName, tableName, partName,
+         deleteData, envContext);
+   }
+ 
+   /**
+    * @param db_name
+    * @param tbl_name
+    * @param part_vals
+    * @param deleteData
+    *          delete the underlying data or just delete the table in metadata
+    * @return true or false
+    * @throws NoSuchObjectException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String,
+    *      java.lang.String, java.util.List, boolean)
+    */
+   @Override
+   public boolean dropPartition(String db_name, String tbl_name,
+       List<String> part_vals, boolean deleteData) throws NoSuchObjectException,
+       MetaException, TException {
+     return dropPartition(db_name, tbl_name, part_vals, deleteData, null);
+   }
+ 
+   @Override
+   public boolean dropPartition(String db_name, String tbl_name,
+       List<String> part_vals, PartitionDropOptions options) throws TException {
+     return dropPartition(db_name, tbl_name, part_vals, options.deleteData,
+                          options.purgeData? getEnvironmentContextWithIfPurgeSet() : null);
+   }
+ 
+   public boolean dropPartition(String db_name, String tbl_name, List<String> part_vals,
+       boolean deleteData, EnvironmentContext envContext) throws NoSuchObjectException,
+       MetaException, TException {
+     return client.drop_partition_with_environment_context(db_name, tbl_name, part_vals, deleteData,
+         envContext);
+   }
+ 
+   @Override
+   public List<Partition> dropPartitions(String dbName, String tblName,
+                                         List<ObjectPair<Integer, byte[]>> partExprs, PartitionDropOptions options)
+       throws TException {
+     RequestPartsSpec rps = new RequestPartsSpec();
+     List<DropPartitionsExpr> exprs = new ArrayList<>(partExprs.size());
+     for (ObjectPair<Integer, byte[]> partExpr : partExprs) {
+       DropPartitionsExpr dpe = new DropPartitionsExpr();
+       dpe.setExpr(partExpr.getSecond());
+       dpe.setPartArchiveLevel(partExpr.getFirst());
+       exprs.add(dpe);
+     }
+     rps.setExprs(exprs);
+     DropPartitionsRequest req = new DropPartitionsRequest(dbName, tblName, rps);
+     req.setDeleteData(options.deleteData);
+     req.setNeedResult(options.returnResults);
+     req.setIfExists(options.ifExists);
+     if (options.purgeData) {
+       LOG.info("Dropped partitions will be purged!");
+       req.setEnvironmentContext(getEnvironmentContextWithIfPurgeSet());
+     }
+     return client.drop_partitions_req(req).getPartitions();
+   }
+ 
+   @Override
+   public List<Partition> dropPartitions(String dbName, String tblName,
+       List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
+       boolean ifExists, boolean needResult) throws NoSuchObjectException, MetaException, TException {
+ 
+     return dropPartitions(dbName, tblName, partExprs,
+                           PartitionDropOptions.instance()
+                                               .deleteData(deleteData)
+                                               .ifExists(ifExists)
+                                               .returnResults(needResult));
+ 
+   }
+ 
+   @Override
+   public List<Partition> dropPartitions(String dbName, String tblName,
+       List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
+       boolean ifExists) throws NoSuchObjectException, MetaException, TException {
+     // By default, we need the results from dropPartitions();
+     return dropPartitions(dbName, tblName, partExprs,
+                           PartitionDropOptions.instance()
+                                               .deleteData(deleteData)
+                                               .ifExists(ifExists));
+   }
+ 
+   /**
+    * {@inheritDoc}
+    * @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
+    */
+   @Override
+   public void dropTable(String dbname, String name, boolean deleteData,
+       boolean ignoreUnknownTab) throws MetaException, TException,
+       NoSuchObjectException, UnsupportedOperationException {
+     dropTable(dbname, name, deleteData, ignoreUnknownTab, null);
+   }
+ 
+   /**
+    * Drop the table and choose whether to save the data in the trash.
+    * @param ifPurge completely purge the table (skipping trash) while removing
+    *                data from warehouse
+    * @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
+    */
+   @Override
+   public void dropTable(String dbname, String name, boolean deleteData,
+       boolean ignoreUnknownTab, boolean ifPurge)
+       throws MetaException, TException, NoSuchObjectException, UnsupportedOperationException {
+     //build new environmentContext with ifPurge;
+     EnvironmentContext envContext = null;
+     if(ifPurge){
+       Map<String, String> warehouseOptions;
+       warehouseOptions = new HashMap<>();
+       warehouseOptions.put("ifPurge", "TRUE");
+       envContext = new EnvironmentContext(warehouseOptions);
+     }
+     dropTable(dbname, name, deleteData, ignoreUnknownTab, envContext);
+   }
+ 
+   /**
+    * @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
+    */
+   @Override
+   public void dropTable(String dbname, String name)
+       throws NoSuchObjectException, MetaException, TException {
+     dropTable(dbname, name, true, true, null);
+   }
+ 
+   /**
+    * Drop the table and choose whether to: delete the underlying table data;
+    * throw if the table doesn't exist; save the data in the trash.
+    *
+    * @param dbname
+    * @param name
+    * @param deleteData
+    *          delete the underlying data or just delete the table in metadata
+    * @param ignoreUnknownTab
+    *          don't throw if the requested table doesn't exist
+    * @param envContext
+    *          for communicating with thrift
+    * @throws MetaException
+    *           could not drop table properly
+    * @throws NoSuchObjectException
+    *           the table wasn't found
+    * @throws TException
+    *           a thrift communication error occurred
+    * @throws UnsupportedOperationException
+    *           dropping an index table is not allowed
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String,
+    *      java.lang.String, boolean)
+    */
+   public void dropTable(String dbname, String name, boolean deleteData,
+       boolean ignoreUnknownTab, EnvironmentContext envContext) throws MetaException, TException,
+       NoSuchObjectException, UnsupportedOperationException {
+     Table tbl;
+     try {
+       tbl = getTable(dbname, name);
+     } catch (NoSuchObjectException e) {
+       if (!ignoreUnknownTab) {
+         throw e;
+       }
+       return;
+     }
+     HiveMetaHook hook = getHook(tbl);
+     if (hook != null) {
+       hook.preDropTable(tbl);
+     }
+     boolean success = false;
+     try {
+       drop_table_with_environment_context(dbname, name, deleteData, envContext);
+       if (hook != null) {
+         hook.commitDropTable(tbl, deleteData || (envContext != null && "TRUE".equals(envContext.getProperties().get("ifPurge"))));
+       }
+       success=true;
+     } catch (NoSuchObjectException e) {
+       if (!ignoreUnknownTab) {
+         throw e;
+       }
+     } finally {
+       if (!success && (hook != null)) {
+         hook.rollbackDropTable(tbl);
+       }
+     }
+   }
+ 
+   /**
+    * Truncate the table/partitions in the DEFAULT database.
+    * @param dbName
+    *          The db to which the table to be truncate belongs to
+    * @param tableName
+    *          The table to truncate
+    * @param partNames
+    *          List of partitions to truncate. NULL will truncate the whole table/all partitions
+    * @throws MetaException
+    * @throws TException
+    *           Could not truncate table properly.
+    */
+   @Override
+   public void truncateTable(String dbName, String tableName, List<String> partNames) throws MetaException, TException {
+     client.truncate_table(dbName, tableName, partNames);
+   }
+ 
+   /**
+    * Recycles the files recursively from the input path to the cmroot directory either by copying or moving it.
+    *
+    * @param request Inputs for path of the data files to be recycled to cmroot and
+    *                isPurge flag when set to true files which needs to be recycled are not moved to Trash
+    * @return Response which is currently void
+    */
+   @Override
+   public CmRecycleResponse recycleDirToCmPath(CmRecycleRequest request) throws MetaException, TException {
+     return client.cm_recycle(request);
+   }
+ 
+   /**
+    * @param type
+    * @return true if the type is dropped
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_type(java.lang.String)
+    */
+   public boolean dropType(String type) throws NoSuchObjectException, MetaException, TException {
+     return client.drop_type(type);
+   }
+ 
+   /**
+    * @param name
+    * @return map of types
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type_all(java.lang.String)
+    */
+   public Map<String, Type> getTypeAll(String name) throws MetaException,
+       TException {
+     Map<String, Type> result = null;
+     Map<String, Type> fromClient = client.get_type_all(name);
+     if (fromClient != null) {
+       result = new LinkedHashMap<>();
+       for (String key : fromClient.keySet()) {
+         result.put(key, deepCopy(fromClient.get(key)));
+       }
+     }
+     return result;
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<String> getDatabases(String databasePattern)
+     throws MetaException {
+     try {
+       return filterHook.filterDatabases(client.get_databases(databasePattern));
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<String> getAllDatabases() throws MetaException {
+     try {
+       return filterHook.filterDatabases(client.get_all_databases());
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   /**
+    * @param tbl_name
+    * @param db_name
+    * @param max_parts
+    * @return list of partitions
+    * @throws NoSuchObjectException
+    * @throws MetaException
+    * @throws TException
+    */
+   @Override
+   public List<Partition> listPartitions(String db_name, String tbl_name,
+       short max_parts) throws NoSuchObjectException, MetaException, TException {
+     List<Partition> parts = client.get_partitions(db_name, tbl_name, max_parts);
+     return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts) throws TException {
+     return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs(
+         client.get_partitions_pspec(dbName, tableName, maxParts)));
+   }
+ 
+   @Override
+   public List<Partition> listPartitions(String db_name, String tbl_name,
+       List<String> part_vals, short max_parts)
+       throws NoSuchObjectException, MetaException, TException {
+     List<Partition> parts = client.get_partitions_ps(db_name, tbl_name, part_vals, max_parts);
+     return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsWithAuthInfo(String db_name,
+       String tbl_name, short max_parts, String user_name, List<String> group_names)
+        throws NoSuchObjectException, MetaException, TException {
+     List<Partition> parts = client.get_partitions_with_auth(db_name, tbl_name, max_parts,
+         user_name, group_names);
+     return fastpath ? parts :deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsWithAuthInfo(String db_name,
+       String tbl_name, List<String> part_vals, short max_parts,
+       String user_name, List<String> group_names) throws NoSuchObjectException,
+       MetaException, TException {
+     List<Partition> parts = client.get_partitions_ps_with_auth(db_name,
+         tbl_name, part_vals, max_parts, user_name, group_names);
+     return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   /**
+    * Get list of partitions matching specified filter
+    * @param db_name the database name
+    * @param tbl_name the table name
+    * @param filter the filter string,
+    *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". Filtering can
+    *    be done only on string partition keys.
+    * @param max_parts the maximum number of partitions to return,
+    *    all partitions are returned if -1 is passed
+    * @return list of partitions
+    * @throws MetaException
+    * @throws NoSuchObjectException
+    * @throws TException
+    */
+   @Override
+   public List<Partition> listPartitionsByFilter(String db_name, String tbl_name,
+       String filter, short max_parts) throws MetaException,
+          NoSuchObjectException, TException {
+     List<Partition> parts = client.get_partitions_by_filter(db_name, tbl_name, filter, max_parts);
+     return fastpath ? parts :deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name,
+                                                        String filter, int max_parts) throws MetaException,
+          NoSuchObjectException, TException {
+     return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs(
+         client.get_part_specs_by_filter(db_name, tbl_name, filter, max_parts)));
+   }
+ 
+   @Override
+   public boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr,
+       String default_partition_name, short max_parts, List<Partition> result)
+           throws TException {
+     assert result != null;
+     PartitionsByExprRequest req = new PartitionsByExprRequest(
+         db_name, tbl_name, ByteBuffer.wrap(expr));
+     if (default_partition_name != null) {
+       req.setDefaultPartitionName(default_partition_name);
+     }
+     if (max_parts >= 0) {
+       req.setMaxParts(max_parts);
+     }
+     PartitionsByExprResult r;
+     try {
+       r = client.get_partitions_by_expr(req);
+     } catch (TApplicationException te) {
+       // TODO: backward compat for Hive <= 0.12. Can be removed later.
+       if (te.getType() != TApplicationException.UNKNOWN_METHOD
+           && te.getType() != TApplicationException.WRONG_METHOD_NAME) {
+         throw te;
+       }
+       throw new IncompatibleMetastoreException(
+           "Metastore doesn't support listPartitionsByExpr: " + te.getMessage());
+     }
+     if (fastpath) {
+       result.addAll(r.getPartitions());
+     } else {
+       r.setPartitions(filterHook.filterPartitions(r.getPartitions()));
+       // TODO: in these methods, do we really need to deepcopy?
+       deepCopyPartitions(r.getPartitions(), result);
+     }
+     return !r.isSetHasUnknownPartitions() || r.isHasUnknownPartitions(); // Assume the worst.
+   }
+ 
+   /**
+    * @param name
+    * @return the database
+    * @throws NoSuchObjectException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_database(java.lang.String)
+    */
+   @Override
+   public Database getDatabase(String name) throws NoSuchObjectException,
+       MetaException, TException {
+     Database d = client.get_database(name);
+     return fastpath ? d :deepCopy(filterHook.filterDatabase(d));
+   }
+ 
+   /**
+    * @param tbl_name
+    * @param db_name
+    * @param part_vals
+    * @return the partition
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String,
+    *      java.lang.String, java.util.List)
+    */
+   @Override
+   public Partition getPartition(String db_name, String tbl_name,
+       List<String> part_vals) throws NoSuchObjectException, MetaException, TException {
+     Partition p = client.get_partition(db_name, tbl_name, part_vals);
+     return fastpath ? p : deepCopy(filterHook.filterPartition(p));
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByNames(String db_name, String tbl_name,
+       List<String> part_names) throws NoSuchObjectException, MetaException, TException {
+     List<Partition> parts = client.get_partitions_by_names(db_name, tbl_name, part_names);
+     return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public PartitionValuesResponse listPartitionValues(PartitionValuesRequest request)
+       throws MetaException, TException, NoSuchObjectException {
+     return client.get_partition_values(request);
+   }
+ 
+   @Override
+   public Partition getPartitionWithAuthInfo(String db_name, String tbl_name,
+       List<String> part_vals, String user_name, List<String> group_names)
+       throws MetaException, UnknownTableException, NoSuchObjectException,
+       TException {
+     Partition p = client.get_partition_with_auth(db_name, tbl_name, part_vals, user_name,
+         group_names);
+     return fastpath ? p : deepCopy(filterHook.filterPartition(p));
+   }
+ 
+   /**
+    * @param name
+    * @param dbname
+    * @return the table
+    * @throws NoSuchObjectException
+    * @throws MetaException
+    * @throws TException
+    * @throws NoSuchObjectException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_table(java.lang.String,
+    *      java.lang.String)
+    */
+   @Override
+   public Table getTable(String dbname, String name) throws MetaException,
+       TException, NoSuchObjectException {
+     GetTableRequest req = new GetTableRequest(dbname, name);
+     req.setCapabilities(version);
+     Table t = client.get_table_req(req).getTable();
+     return fastpath ? t : deepCopy(filterHook.filterTable(t));
+   }
+ 
++  @Override
++  public Table getTable(String dbName, String tableName, long txnId, String validWriteIdList)
++      throws MetaException, TException, NoSuchObjectException {
++    GetTableRequest req = new GetTableRequest(dbName, tableName);
++    req.setCapabilities(version);
++    req.setTxnId(txnId);
++    req.setValidWriteIdList(validWriteIdList);
++    Table t = client.get_table_req(req).getTable();
++    return fastpath ? t : deepCopy(filterHook.filterTable(t));
++  }
++
+   /** {@inheritDoc} */
+   @Override
+   public List<Table> getTableObjectsByName(String dbName, List<String> tableNames)
+       throws MetaException, InvalidOperationException, UnknownDBException, TException {
+     GetTablesRequest req = new GetTablesRequest(dbName);
+     req.setTblNames(tableNames);
+     req.setCapabilities(version);
+     List<Table> tabs = client.get_table_objects_by_name_req(req).getTables();
+     return fastpath ? tabs : deepCopyTables(filterHook.filterTables(tabs));
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public Materialization getMaterializationInvalidationInfo(CreationMetadata cm, String validTxnList)
+       throws MetaException, InvalidOperationException, UnknownDBException, TException {
+     return client.get_materialization_invalidation_info(cm, validTxnList);
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm)
+       throws MetaException, InvalidOperationException, UnknownDBException, TException {
+     client.update_creation_metadata(null, dbName, tableName, cm);
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<String> listTableNamesByFilter(String dbName, String filter, short maxTables)
+       throws MetaException, TException, InvalidOperationException, UnknownDBException {
+     return filterHook.filterTableNames(null, dbName,
+         client.get_table_names_by_filter(dbName, filter, maxTables));
+   }
+ 
+   /**
+    * @param name
+    * @return the type
+    * @throws MetaException
+    * @throws TException
+    * @throws NoSuchObjectException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type(java.lang.String)
+    */
+   public Type getType(String name) throws NoSuchObjectException, MetaException, TException {
+     return deepCopy(client.get_type(name));
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<String> getTables(String dbname, String tablePattern) throws MetaException {
+     try {
+       return filterHook.filterTableNames(null, dbname, client.get_tables(dbname, tablePattern));
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<String> getTables(String dbname, String tablePattern, TableType tableType) throws MetaException {
+     try {
+       return filterHook.filterTableNames(null, dbname,
+           client.get_tables_by_type(dbname, tablePattern, tableType.toString()));
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<String> getMaterializedViewsForRewriting(String dbname) throws MetaException {
+     try {
+       return filterHook.filterTableNames(null, dbname, client.get_materialized_views_for_rewriting(dbname));
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   @Override
+   public List<TableMeta> getTableMeta(String dbPatterns, String tablePatterns, List<String> tableTypes)
+       throws MetaException {
+     try {
+       return filterNames(client.get_table_meta(dbPatterns, tablePatterns, tableTypes));
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   private List<TableMeta> filterNames(List<TableMeta> metas) throws MetaException {
+     Map<String, TableMeta> sources = new LinkedHashMap<>();
+     Map<String, List<String>> dbTables = new LinkedHashMap<>();
+     for (TableMeta meta : metas) {
+       sources.put(meta.getDbName() + "." + meta.getTableName(), meta);
+       List<String> tables = dbTables.get(meta.getDbName());
+       if (tables == null) {
+         dbTables.put(meta.getDbName(), tables = new ArrayList<>());
+       }
+       tables.add(meta.getTableName());
+     }
+     List<TableMeta> filtered = new ArrayList<>();
+     for (Map.Entry<String, List<String>> entry : dbTables.entrySet()) {
+       for (String table : filterHook.filterTableNames(null, entry.getKey(), entry.getValue())) {
+         filtered.add(sources.get(entry.getKey() + "." + table));
+       }
+     }
+     return filtered;
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<String> getAllTables(String dbname) throws MetaException {
+     try {
+       return filterHook.filterTableNames(null, dbname, client.get_all_tables(dbname));
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   @Override
+   public boolean tableExists(String databaseName, String tableName) throws MetaException,
+       TException, UnknownDBException {
+     try {
+       GetTableRequest req = new GetTableRequest(databaseName, tableName);
+       req.setCapabilities(version);
+       return filterHook.filterTable(client.get_table_req(req).getTable()) != null;
+     } catch (NoSuchObjectException e) {
+       return false;
+     }
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String dbName, String tblName,
+       short max) throws NoSuchObjectException, MetaException, TException {
+     return filterHook.filterPartitionNames(null, dbName, tblName,
+         client.get_partition_names(dbName, tblName, max));
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String db_name, String tbl_name,
+       List<String> part_vals, short max_parts)
+       throws MetaException, TException, NoSuchObjectException {
+     return filterHook.filterPartitionNames(null, db_name, tbl_name,
+         client.get_partition_names_ps(db_name, tbl_name, part_vals, max_parts));
+   }
+ 
+   /**
+    * Get number of partitions matching specified filter
+    * @param db_name the database name
+    * @param tbl_name the table name
+    * @param filter the filter string,
+    *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". Filtering can
+    *    be done only on string partition keys.
+    * @return number of partitions
+    * @throws MetaException
+    * @throws NoSuchObjectException
+    * @throws TException
+    */
+   @Override
+   public int getNumPartitionsByFilter(String db_name, String tbl_name,
+                                       String filter) throws MetaException,
+           NoSuchObjectException, TException {
+     return client.get_num_partitions_by_filter(db_name, tbl_name, filter);
+   }
+ 
+   @Override
+   public void alter_partition(String dbName, String tblName, Partition newPart)
+       throws InvalidOperationException, MetaException, TException {
+     client.alter_partition_with_environment_context(dbName, tblName, newPart, null);
+   }
+ 
+   @Override
+   public void alter_partition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext)
+       throws InvalidOperationException, MetaException, TException {
+     client.alter_partition_with_environment_context(dbName, tblName, newPart, environmentContext);
+   }
+ 
+   @Override
+   public void alter_partitions(String dbName, String tblName, List<Partition> newParts)
+       throws InvalidOperationException, MetaException, TException {
 -    client.alter_partitions_with_environment_context(dbName, tblName, newParts, null);
++    client.alter_partitions(dbName, tblName, newParts);
+   }
+ 
+   @Override
+   public void alter_partitions(String dbName, String tblName, List<Partition> newParts, EnvironmentContext environmentContext)
+   throws InvalidOperationException, MetaException, TException {
 -    client.alter_partitions_with_environment_context(dbName, tblName, newParts, environmentContext);
++    AlterPartitionsRequest req = new AlterPartitionsRequest();
++    req.setDbName(dbName);
++    req.setTableName(tblName);
++    req.setPartitions(newParts);
++    req.setEnvironmentContext(environmentContext);
++    client.alter_partitions_req(req);
++  }
++
++  @Override
++  public void alter_partitions(String dbName, String tblName, List<Partition> newParts,
++                               EnvironmentContext environmentContext,
++                               long txnId, String writeIdList, long writeId)
++      throws InvalidOperationException, MetaException, TException {
++    AlterPartitionsRequest req = new AlterPartitionsRequest();
++    req.setDbName(dbName);
++    req.setTableName(tblName);
++    req.setPartitions(newParts);
++    req.setEnvironmentContext(environmentContext);
++    req.setTxnId(txnId);
++    req.setValidWriteIdList(writeIdList);
++    client.alter_partitions_req(req);
+   }
+ 
+   @Override
+   public void alterDatabase(String dbName, Database db)
+       throws MetaException, NoSuchObjectException, TException {
+     client.alter_database(dbName, db);
+   }
+   /**
+    * @param db
+    * @param tableName
+    * @throws UnknownTableException
+    * @throws UnknownDBException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_fields(java.lang.String,
+    *      java.lang.String)
+    */
+   @Override
+   public List<FieldSchema> getFields(String db, String tableName)
+       throws MetaException, TException, UnknownTableException,
+       UnknownDBException {
+     List<FieldSchema> fields = client.get_fields(db, tableName);
+     return fastpath ? fields : deepCopyFieldSchemas(fields);
+   }
+ 
+   @Override
+   public List<SQLPrimaryKey> getPrimaryKeys(PrimaryKeysRequest req)
+     throws MetaException, NoSuchObjectException, TException {
+     return client.get_primary_keys(req).getPrimaryKeys();
+   }
+ 
+   @Override
+   public List<SQLForeignKey> getForeignKeys(ForeignKeysRequest req) throws MetaException,
+     NoSuchObjectException, TException {
+     return client.get_foreign_keys(req).getForeignKeys();
+   }
+ 
+   @Override
+   public List<SQLUniqueConstraint> getUniqueConstraints(UniqueConstraintsRequest req)
+     throws MetaException, NoSuchObjectException, TException {
+     return client.get_unique_constraints(req).getUniqueConstraints();
+   }
+ 
+   @Override
+   public List<SQLNotNullConstraint> getNotNullConstraints(NotNullConstraintsRequest req)
+     throws MetaException, NoSuchObjectException, TException {
+     return client.get_not_null_constraints(req).getNotNullConstraints();
+   }
+ 
+   @Override
+   public List<SQLDefaultConstraint> getDefaultConstraints(DefaultConstraintsRequest req)
+       throws MetaException, NoSuchObjectException, TException {
+     return client.get_default_constraints(req).getDefaultConstraints();
+   }
+ 
+   @Override
+   public List<SQLCheckConstraint> getCheckConstraints(CheckConstraintsRequest request) throws
+       MetaException, NoSuchObjectException, TException {
+     return client.get_check_constraints(request).getCheckConstraints();
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   @Deprecated
+   //use setPartitionColumnStatistics instead
+   public boolean updateTableColumnStatistics(ColumnStatistics statsObj)
+     throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
+     InvalidInputException{
+     return client.update_table_column_statistics(statsObj);
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   @Deprecated
+   //use setPartitionColumnStatistics instead
+   public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj)
+     throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
+     InvalidInputException{
+     return client.update_partition_column_statistics(statsObj);
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request)
+     throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
+     InvalidInputException{
+     return client.set_aggr_stats_for(request);
+   }
+ 
+   @Override
+   public void flushCache() {
+     try {
+       client.flushCache();
+     } catch (TException e) {
+       // Not much we can do about it honestly
+       LOG.warn("Got error flushing the cache", e);
+     }
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<ColumnStatisticsObj> getTableColumnStatistics(String dbName, String tableName,
+       List<String> colNames) throws NoSuchObjectException, MetaException, TException,
+       InvalidInputException, InvalidObjectException {
+     return client.get_table_statistics_req(
+         new TableStatsRequest(dbName, tableName, colNames)).getTableStats();
+   }
+ 
++  @Override
++  public List<ColumnStatisticsObj> getTableColumnStatistics(
++      String dbName, String tableName, List<String> colNames, long txnId, String validWriteIdList)
++      throws NoSuchObjectException, MetaException, TException {
++    TableStatsRequest tsr = new TableStatsRequest(dbName, tableName, colNames);
++    tsr.setTxnId(txnId);
++    tsr.setValidWriteIdList(validWriteIdList);
++
++    return client.get_table_statistics_req(tsr).getTableStats();
++  }
++
+   /** {@inheritDoc} */
+   @Override
+   public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
+       String dbName, String tableName, List<String> partNames, List<String> colNames)
+           throws NoSuchObjectException, MetaException, TException {
+     return client.get_partitions_statistics_req(
+         new PartitionsStatsRequest(dbName, tableName, colNames, partNames)).getPartStats();
+   }
+ 
++  @Override
++  public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
++      String dbName, String tableName, List<String> partNames,
++      List<String> colNames, long txnId, String validWriteIdList)
++      throws NoSuchObjectException, MetaException, TException {
++    PartitionsStatsRequest psr = new PartitionsStatsRequest(dbName, tableName, colNames, partNames);
++    psr.setTxnId(txnId);
++    psr.setValidWriteIdList(validWriteIdList);
++    return client.get_partitions_statistics_req(
++        psr).getPartStats();
++  }
++
+   /** {@inheritDoc} */
+   @Override
+   public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName,
+     String colName) throws NoSuchObjectException, InvalidObjectException, MetaException,
+     TException, InvalidInputException
+   {
+     return client.delete_partition_column_statistics(dbName, tableName, partName, colName);
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName)
+     throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
+     InvalidInputException
+   {
+     return client.delete_table_column_statistics(dbName, tableName, colName);
+   }
+ 
+   /**
+    * @param db
+    * @param tableName
+    * @throws UnknownTableException
+    * @throws UnknownDBException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_schema(java.lang.String,
+    *      java.lang.String)
+    */
+   @Override
+   public List<FieldSchema> getSchema(String db, String tableName)
+       throws MetaException, TException, UnknownTableException,
+       UnknownDBException {
+       EnvironmentContext envCxt = null;
+       String addedJars = MetastoreConf.getVar(conf, ConfVars.ADDED_JARS);
+       if(org.apache.commons.lang.StringUtils.isNotBlank(addedJars)) {
+          Map<String, String> props = new HashMap<String, String>();
+          props.put("hive.added.jars.path", addedJars);
+          envCxt = new EnvironmentContext(props);
+        }
+ 
+     List<FieldSchema> fields = client.get_schema_with_environment_context(db, tableName, envCxt);
+     return fastpath ? fields : deepCopyFieldSchemas(fields);
+   }
+ 
+   @Override
+   public String getConfigValue(String name, String defaultValue)
+       throws TException, ConfigValSecurityException {
+     return client.get_config_value(name, defaultValue);
+   }
+ 
+   @Override
+   public Partition getPartition(String db, String tableName, String partName)
+       throws MetaException, TException, UnknownTableException, NoSuchObjectException {
+     Partition p = client.get_partition_by_name(db, tableName, partName);
+     return fastpath ? p : deepCopy(filterHook.filterPartition(p));
+   }
+ 
+   public Partition appendPartitionByName(String dbName, String tableName, String partName)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
+     return appendPartitionByName(dbName, tableName, partName, null);
+   }
+ 
+   public Partition appendPartitionByName(String dbName, String tableName, String partName,
+       EnvironmentContext envContext) throws InvalidObjectException, AlreadyExistsException,
+       MetaException, TException {
+     Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName,
+         partName, envContext);
+     return fastpath ? p : deepCopy(p);
+   }
+ 
+   public boolean dropPartitionByName(String dbName, String tableName, String partName,
+       boolean deleteData) throws NoSuchObjectException, MetaException, TException {
+     return dropPartitionByName(dbName, tableName, partName, deleteData, null);
+   }
+ 
+   public boolean dropPartitionByName(String dbName, String tableName, String partName,
+       boolean deleteData, EnvironmentContext envContext) throws NoSuchObjectException,
+       MetaException, TException {
+     return client.drop_partition_by_name_with_environment_context(dbName, tableName, partName,
+         deleteData, envContext);
+   }
+ 
+   private HiveMetaHook getHook(Table tbl) throws MetaException {
+     if (hookLoader == null) {
+       return null;
+     }
+     return hookLoader.getHook(tbl);
+   }
+ 
+   @Override
+   public List<String> partitionNameToVals(String name) throws MetaException, TException {
+     return client.partition_name_to_vals(name);
+   }
+ 
+   @Override
+   public Map<String, String> partitionNameToSpec(String name) throws MetaException, TException{
+     return client.partition_name_to_spec(name);
+   }
+ 
+   /**
+    * @param partition
+    * @return
+    */
+   private Partition deepCopy(Partition partition) {
+     Partition copy = null;
+     if (partition != null) {
+       copy = new Partition(partition);
+     }
+     return copy;
+   }
+ 
+   private Database deepCopy(Database database) {
+     Database copy = null;
+     if (database != null) {
+       copy = new Database(database);
+     }
+     return copy;
+   }
+ 
+   protected Table deepCopy(Table table) {
+     Table copy = null;
+     if (table != null) {
+       copy = new Table(table);
+     }
+     return copy;
+   }
+ 
+   private Type deepCopy(Type type) {
+     Type copy = null;
+     if (type != null) {
+       copy = new Type(type);
+     }
+     return copy;
+   }
+ 
+   private FieldSchema deepCopy(FieldSchema schema) {
+     FieldSchema copy = null;
+     if (schema != null) {
+       copy = new FieldSchema(schema);
+     }
+     return copy;
+   }
+ 
+   private Function deepCopy(Function func) {
+     Function copy = null;
+     if (func != null) {
+       copy = new Function(func);
+     }
+     return copy;
+   }
+ 
+   protected PrincipalPrivilegeSet deepCopy(PrincipalPrivilegeSet pps) {
+     PrincipalPrivilegeSet copy = null;
+     if (pps != null) {
+       copy = new PrincipalPrivilegeSet(pps);
+     }
+     return copy;
+   }
+ 
+   private List<Partition> deepCopyPartitions(List<Partition> partitions) {
+     return deepCopyPartitions(partitions, null);
+   }
+ 
+   private List<Partition> deepCopyPartitions(
+       Collection<Partition> src, List<Partition> dest) {
+     if (src == null) {
+       return dest;
+     }
+     if (dest == null) {
+       dest = new ArrayList<Partition>(src.size());
+     }
+     for (Partition part : src) {
+       dest.add(deepCopy(part));
+     }
+     return dest;
+   }
+ 
+   private List<Table> deepCopyTables(List<Table> tables) {
+     List<Table> copy = null;
+     if (tables != null) {
+       copy = new ArrayList<Table>();
+       for (Table tab : tables) {
+         copy.add(deepCopy(tab));
+       }
+     }
+     return copy;
+   }
+ 
+   protected List<FieldSchema> deepCopyFieldSchemas(List<FieldSchema> schemas) {
+     List<FieldSchema> copy = null;
+     if (schemas != null) {
+       copy = new ArrayList<FieldSchema>();
+       for (FieldSchema schema : schemas) {
+         copy.add(deepCopy(schema));
+       }
+     }
+     return copy;
+   }
+ 
+   @Override
+   public boolean grant_role(String roleName, String userName,
+       PrincipalType principalType, String grantor, PrincipalType grantorType,
+       boolean grantOption) throws MetaException, TException {
+     GrantRevokeRoleRequest req = new GrantRevokeRoleRequest();
+     req.setRequestType(GrantRevokeType.GRANT);
+     req.setRoleName(roleName);
+     req.setPrincipalName(userName);
+     req.setPrincipalType(principalType);
+     req.setGrantor(grantor);
+     req.setGrantorType(grantorType);
+     req.setGrantOption(grantOption);
+     GrantRevokeRoleResponse res = client.grant_revoke_role(req);
+     if (!res.isSetSuccess()) {
+       throw new MetaException("GrantRevokeResponse missing success field");
+     }
+     return res.isSuccess();
+   }
+ 
+   @Override
+   public boolean create_role(Role role)
+       throws MetaException, TException {
+     return client.create_role(role);
+   }
+ 
+   @Override
+   public boolean drop_role(String roleName) throws MetaException, TException {
+     return client.drop_role(roleName);
+   }
+ 
+   @Override
+   public List<Role> list_roles(String principalName,
+       PrincipalType principalType) throws MetaException, TException {
+     return client.list_roles(principalName, principalType);
+   }
+ 
+   @Override
+   public List<String> listRoleNames() throws MetaException, TException {
+     return client.get_role_names();
+   }
+ 
+   @Override
+   public GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest req)
+       throws MetaException, TException {
+     return client.get_principals_in_role(req);
+   }
+ 
+   @Override
+   public GetRoleGrantsForPrincipalResponse get_role_grants_for_principal(
+       GetRoleGrantsForPrincipalRequest getRolePrincReq) throws MetaException, TException {
+     return client.get_role_grants_for_principal(getRolePrincReq);
+   }
+ 
+   @Override
+   public boolean grant_privileges(PrivilegeBag privileges)
+       throws MetaException, TException {
+     GrantRevokePrivilegeRequest req = new GrantRevokePrivilegeRequest();
+     req.setRequestType(GrantRevokeType.GRANT);
+     req.setPrivileges(privileges);
+     GrantRevokePrivilegeResponse res = client.grant_revoke_privileges(req);
+     if (!res.isSetSuccess()) {
+       throw new MetaException("GrantRevokePrivilegeResponse missing success field");
+     }
+     return res.isSuccess();
+   }
+ 
+   @Override
+   public boolean revoke_role(String roleName, String userName,
+       PrincipalType principalType, boolean grantOption) throws MetaException, TException {
+     GrantRevokeRoleRequest req = new GrantRevokeRoleRequest();
+     req.setRequestType(GrantRevokeType.REVOKE);
+     req.setRoleName(roleName);
+     req.setPrincipalName(userName);
+     req.setPrincipalType(principalType);
+     req.setGrantOption(grantOption);
+     GrantRevokeRoleResponse res = client.grant_revoke_role(req);
+     if (!res.isSetSuccess()) {
+       throw new MetaException("GrantRevokeResponse missing success field");
+     }
+     return res.isSuccess();
+   }
+ 
+   @Override
+   public boolean revoke_privileges(PrivilegeBag privileges, boolean grantOption) throws MetaException,
+       TException {
+     GrantRevokePrivilegeRequest req = new GrantRevokePrivilegeRequest();
+     req.setRequestType(GrantRevokeType.REVOKE);
+     req.setPrivileges(privileges);
+     req.setRevokeGrantOption(grantOption);
+     GrantRevokePrivilegeResponse res = client.grant_revoke_privileges(req);
+     if (!res.isSetSuccess()) {
+       throw new MetaException("GrantRevokePrivilegeResponse missing success field");
+     }
+     return res.isSuccess();
+   }
+ 
+   @Override
+   public boolean refresh_privileges(HiveObjectRef objToRefresh, String authorizer,
+       PrivilegeBag grantPrivileges) throws MetaException,
+       TException {
+     String defaultCat = getDefaultCatalog(conf);
+     objToRefresh.setCatName(defaultCat);
+ 
+     if (grantPrivileges.getPrivileges() != null) {
+       for (HiveObjectPrivilege priv : grantPrivileges.getPrivileges()) {
+         if (!priv.getHiveObject().isSetCatName()) {
+           priv.getHiveObject().setCatName(defaultCat);
+         }
+       }
+     }
+     GrantRevokePrivilegeRequest grantReq = new GrantRevokePrivilegeRequest();
+     grantReq.setRequestType(GrantRevokeType.GRANT);
+     grantReq.setPrivileges(grantPrivileges);
+ 
+     GrantRevokePrivilegeResponse res = client.refresh_privileges(objToRefresh, authorizer, grantReq);
+     if (!res.isSetSuccess()) {
+       throw new MetaException("GrantRevokePrivilegeResponse missing success field");
+     }
+     return res.isSuccess();
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject,
+       String userName, List<String> groupNames) throws MetaException,
+       TException {
+     return client.get_privilege_set(hiveObject, userName, groupNames);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> list_privileges(String principalName,
+       PrincipalType principalType, HiveObjectRef hiveObject)
+       throws MetaException, TException {
+     return client.list_privileges(principalName, principalType, hiveObject);
+   }
+ 
+   public String getDelegationToken(String renewerKerberosPrincipalName) throws
+   MetaException, TException, IOException {
+     //a convenience method that makes the intended owner for the delegation
+     //token request the current user
+     String owner = SecurityUtils.getUser();
+     return getDelegationToken(owner, renewerKerberosPrincipalName);
+   }
+ 
+   @Override
+   public String getDelegationToken(String owner, String renewerKerberosPrincipalName) throws
+   MetaException, TException {
+     // This is expected to be a no-op, so we will return null when we use local metastore.
+     if (localMetaStore) {
+       return null;
+     }
+     return client.get_delegation_token(owner, renewerKerberosPrincipalName);
+   }
+ 
+   @Override
+   public long renewDelegationToken(String tokenStrForm) throws MetaException, TException {
+     if (localMetaStore) {
+       return 0;
+     }
+     return client.renew_delegation_token(tokenStrForm);
+ 
+   }
+ 
+   @Override
+   public void cancelDelegationToken(String tokenStrForm) throws MetaException, TException {
+     if (localMetaStore) {
+       return;
+     }
+     client.cancel_delegation_token(tokenStrForm);
+   }
+ 
+   @Override
+   public boolean addToken(String tokenIdentifier, String delegationToken) throws TException {
+      return client.add_token(tokenIdentifier, delegationToken);
+   }
+ 
+   @Override
+   public boolean removeToken(String tokenIdentifier) throws TException {
+     return client.remove_token(tokenIdentifier);
+   }
+ 
+   @Override
+   public String getToken(String tokenIdentifier) throws TException {
+     return client.get_token(tokenIdentifier);
+   }
+ 
+   @Override
+   public List<String> getAllTokenIdentifiers() throws TException {
+     return client.get_all_token_identifiers();
+   }
+ 
+   @Override
+   public int addMasterKey(String key) throws MetaException, TException {
+     return client.add_master_key(key);
+   }
+ 
+   @Override
+   public void updateMasterKey(Integer seqNo, String key)
+       throws NoSuchObjectException, MetaException, TException {
+     client.update_master_key(seqNo, key);
+   }
+ 
+   @Override
+   public boolean removeMasterKey(Integer keySeq) throws TException {
+     return client.remove_master_key(keySeq);
+   }
+ 
+   @Override
+   public String[] getMasterKeys() throws TException {
+     List<String> keyList = client.get_master_keys();
+     return keyList.toArray(new String[keyList.size()]);
+   }
+ 
+   @Override
+   public ValidTxnList getValidTxns() throws TException {
+     return TxnUtils.createValidReadTxnList(client.get_open_txns(), 0);
+   }
+ 
+   @Override
+   public ValidTxnList getValidTxns(long currentTxn) throws TException {
+     return TxnUtils.createValidReadTxnList(client.get_open_txns(), currentTxn);
+   }
+ 
+   @Override
+   public ValidWriteIdList getValidWriteIds(String fullTableName) throws TException {
+     GetValidWriteIdsRequest rqst = new GetValidWriteIdsRequest(Collections.singletonList(fullTableName), null);
+     GetValidWriteIdsResponse validWriteIds = client.get_valid_write_ids(rqst);
+     return TxnUtils.createValidReaderWriteIdList(validWriteIds.getTblValidWriteIds().get(0));
+   }
+ 
+   @Override
+   public List<TableValidWriteIds> getValidWriteIds(List<String> tablesList, String validTxnList)
+           throws TException {
+     GetValidWriteIdsRequest rqst = new GetValidWriteIdsRequest(tablesList, validTxnList);
+     return client.get_valid_write_ids(rqst).getTblValidWriteIds();
+   }
+ 
+   @Override
+   public long openTxn(String user) throws TException {
+     OpenTxnsResponse txns = openTxns(user, 1);
+     return txns.getTxn_ids().get(0);
+   }
+ 
+   @Override
+   public OpenTxnsResponse openTxns(String user, int numTxns) throws TException {
+     return openTxnsIntr(user, numTxns, null, null);
+   }
+ 
+   @Override
+   public List<Long> replOpenTxn(

<TRUNCATED>

[03/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
index 0000000,62ed380..e4854f9
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
@@@ -1,0 -1,1075 +1,1075 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.cache;
+ 
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.HashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.concurrent.Callable;
+ import java.util.concurrent.ExecutorService;
+ import java.util.concurrent.Executors;
+ import java.util.concurrent.ThreadFactory;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog;
+ import org.apache.hadoop.hive.metastore.HiveMetaStore;
+ import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
+ import org.apache.hadoop.hive.metastore.ObjectStore;
+ import org.apache.hadoop.hive.metastore.TableType;
+ import org.apache.hadoop.hive.metastore.Warehouse;
+ import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest;
+ import org.apache.hadoop.hive.metastore.api.AggrStats;
+ import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PrincipalType;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+ import org.apache.hadoop.hive.metastore.columnstats.cache.LongColumnStatsDataInspector;
+ import org.apache.hadoop.hive.metastore.columnstats.cache.StringColumnStatsDataInspector;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.junit.Assert;
+ import org.junit.Before;
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ 
+ import jline.internal.Log;
+ 
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+ 
+ @Category(MetastoreCheckinTest.class)
+ public class TestCachedStore {
+ 
+   private ObjectStore objectStore;
+   private CachedStore cachedStore;
+   private SharedCache sharedCache;
+   private Configuration conf;
+ 
+   @Before
+   public void setUp() throws Exception {
+     conf = MetastoreConf.newMetastoreConf();
+     MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true);
+     // Disable memory estimation for this test class
+     MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb");
+     MetaStoreTestUtils.setConfForStandloneMode(conf);
+     objectStore = new ObjectStore();
+     objectStore.setConf(conf);
+     cachedStore = new CachedStore();
+     cachedStore.setConfForTest(conf);
+     // Stop the CachedStore cache update service. We'll start it explicitly to control the test
+     CachedStore.stopCacheUpdateService(1);
+     sharedCache = new SharedCache();
+     sharedCache.getDatabaseCache().clear();
+     sharedCache.getTableCache().clear();
+     sharedCache.getSdCache().clear();
+ 
+     // Create the 'hive' catalog
+     HiveMetaStore.HMSHandler.createDefaultCatalog(objectStore, new Warehouse(conf));
+   }
+ 
+   /**********************************************************************************************
+    * Methods that test CachedStore
+    *********************************************************************************************/
+ 
+   @Test
+   public void testDatabaseOps() throws Exception {
+     // Add a db via ObjectStore
+     String dbName = "testDatabaseOps";
+     String dbOwner = "user1";
+     Database db = createTestDb(dbName, dbOwner);
+     objectStore.createDatabase(db);
+     db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+     // Prewarm CachedStore
+     CachedStore.setCachePrewarmedState(false);
+     CachedStore.prewarm(objectStore);
+ 
+     // Read database via CachedStore
+     Database dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+     Assert.assertEquals(db, dbRead);
+ 
+     // Add another db via CachedStore
+     final String dbName1 = "testDatabaseOps1";
+     Database db1 = createTestDb(dbName1, dbOwner);
+     cachedStore.createDatabase(db1);
+     db1 = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1);
+ 
+     // Read db via ObjectStore
+     dbRead = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1);
+     Assert.assertEquals(db1, dbRead);
+ 
+     // Alter the db via CachedStore (can only alter owner or parameters)
+     dbOwner = "user2";
+     db = new Database(db);
+     db.setOwnerName(dbOwner);
+     cachedStore.alterDatabase(DEFAULT_CATALOG_NAME, dbName, db);
+     db = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+ 
+     // Read db via ObjectStore
+     dbRead = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+     Assert.assertEquals(db, dbRead);
+ 
+     // Add another db via ObjectStore
+     final String dbName2 = "testDatabaseOps2";
+     Database db2 = createTestDb(dbName2, dbOwner);
+     objectStore.createDatabase(db2);
+     db2 = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName2);
+ 
+     // Alter db "testDatabaseOps" via ObjectStore
+     dbOwner = "user1";
+     db = new Database(db);
+     db.setOwnerName(dbOwner);
+     objectStore.alterDatabase(DEFAULT_CATALOG_NAME, dbName, db);
+     db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+ 
+     // Drop db "testDatabaseOps1" via ObjectStore
+     objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName1);
+ 
+     // We update twice to accurately detect if cache is dirty or not
+     updateCache(cachedStore);
+     updateCache(cachedStore);
+ 
+     // Read the newly added db via CachedStore
+     dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName2);
+     Assert.assertEquals(db2, dbRead);
+ 
+     // Read the altered db via CachedStore (altered user from "user2" to "user1")
+     dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+     Assert.assertEquals(db, dbRead);
+ 
+     // Try to read the dropped db after cache update
+     try {
+       dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1);
+       Assert.fail("The database: " + dbName1
+           + " should have been removed from the cache after running the update service");
+     } catch (NoSuchObjectException e) {
+       // Expected
+     }
+ 
+     // Clean up
+     objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName);
+     objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName2);
+     sharedCache.getDatabaseCache().clear();
+     sharedCache.getTableCache().clear();
+     sharedCache.getSdCache().clear();
+   }
+ 
+   @Test
+   public void testTableOps() throws Exception {
+     // Add a db via ObjectStore
+     String dbName = "testTableOps";
+     String dbOwner = "user1";
+     Database db = createTestDb(dbName, dbOwner);
+     objectStore.createDatabase(db);
+     db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+ 
+     // Add a table via ObjectStore
+     String tblName = "tbl";
+     String tblOwner = "user1";
+     FieldSchema col1 = new FieldSchema("col1", "int", "integer column");
+     FieldSchema col2 = new FieldSchema("col2", "string", "string column");
+     List<FieldSchema> cols = new ArrayList<FieldSchema>();
+     cols.add(col1);
+     cols.add(col2);
+     List<FieldSchema> ptnCols = new ArrayList<FieldSchema>();
+     Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols);
+     objectStore.createTable(tbl);
+     tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+ 
+     // Prewarm CachedStore
+     CachedStore.setCachePrewarmedState(false);
+     CachedStore.prewarm(objectStore);
+ 
+     // Read database, table via CachedStore
+     Database dbRead= cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+     Assert.assertEquals(db, dbRead);
+     Table tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+     Assert.assertEquals(tbl, tblRead);
+ 
+     // Add a new table via CachedStore
+     String tblName1 = "tbl1";
+     Table tbl1 = new Table(tbl);
+     tbl1.setTableName(tblName1);
+     cachedStore.createTable(tbl1);
+     tbl1 = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName1);
+ 
+     // Read via object store
+     tblRead = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName1);
+     Assert.assertEquals(tbl1, tblRead);
+ 
+     // Add a new table via ObjectStore
+     String tblName2 = "tbl2";
+     Table tbl2 = new Table(tbl);
+     tbl2.setTableName(tblName2);
+     objectStore.createTable(tbl2);
+     tbl2 = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName2);
+ 
+     // Alter table "tbl" via ObjectStore
+     tblOwner = "role1";
+     tbl.setOwner(tblOwner);
+     tbl.setOwnerType(PrincipalType.ROLE);
 -    objectStore.alterTable(DEFAULT_CATALOG_NAME, dbName, tblName, tbl);
++    objectStore.alterTable(DEFAULT_CATALOG_NAME, dbName, tblName, tbl, -1, null);
+     tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+ 
+     Assert.assertEquals("Owner of the table did not change.", tblOwner, tbl.getOwner());
+     Assert.assertEquals("Owner type of the table did not change", PrincipalType.ROLE, tbl.getOwnerType());
+ 
+     // Drop table "tbl1" via ObjectStore
+     objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName1);
+ 
+     // We update twice to accurately detect if cache is dirty or not
+     updateCache(cachedStore);
+     updateCache(cachedStore);
+ 
+     // Read "tbl2" via CachedStore
+     tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName2);
+     Assert.assertEquals(tbl2, tblRead);
+ 
+     // Read the altered "tbl" via CachedStore
+     tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+     Assert.assertEquals(tbl, tblRead);
+ 
+     // Try to read the dropped "tbl1" via CachedStore (should throw exception)
+     tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName1);
+     Assert.assertNull(tblRead);
+ 
+     // Should return "tbl" and "tbl2"
+     List<String> tblNames = cachedStore.getTables(DEFAULT_CATALOG_NAME, dbName, "*");
+     Assert.assertTrue(tblNames.contains(tblName));
+     Assert.assertTrue(!tblNames.contains(tblName1));
+     Assert.assertTrue(tblNames.contains(tblName2));
+ 
+     // Clean up
+     objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+     objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName2);
+     objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName);
+     sharedCache.getDatabaseCache().clear();
+     sharedCache.getTableCache().clear();
+     sharedCache.getSdCache().clear();
+   }
+ 
+   @Test
+   public void testPartitionOps() throws Exception {
+     // Add a db via ObjectStore
+     String dbName = "testPartitionOps";
+     String dbOwner = "user1";
+     Database db = createTestDb(dbName, dbOwner);
+     objectStore.createDatabase(db);
+     db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+ 
+     // Add a table via ObjectStore
+     String tblName = "tbl";
+     String tblOwner = "user1";
+     FieldSchema col1 = new FieldSchema("col1", "int", "integer column");
+     FieldSchema col2 = new FieldSchema("col2", "string", "string column");
+     List<FieldSchema> cols = new ArrayList<FieldSchema>();
+     cols.add(col1);
+     cols.add(col2);
+     FieldSchema ptnCol1 = new FieldSchema("part1", "string", "string partition column");
+     List<FieldSchema> ptnCols = new ArrayList<FieldSchema>();
+     ptnCols.add(ptnCol1);
+     Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols);
+     objectStore.createTable(tbl);
+     tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+ 
+     final String ptnColVal1 = "aaa";
+     Map<String, String> partParams = new HashMap<String, String>();
+     Partition ptn1 =
+         new Partition(Arrays.asList(ptnColVal1), dbName, tblName, 0, 0, tbl.getSd(), partParams);
+     ptn1.setCatName(DEFAULT_CATALOG_NAME);
+     objectStore.addPartition(ptn1);
+     ptn1 = objectStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1));
+     ptn1.setCatName(DEFAULT_CATALOG_NAME);
+     final String ptnColVal2 = "bbb";
+     Partition ptn2 =
+         new Partition(Arrays.asList(ptnColVal2), dbName, tblName, 0, 0, tbl.getSd(), partParams);
+     ptn2.setCatName(DEFAULT_CATALOG_NAME);
+     objectStore.addPartition(ptn2);
+     ptn2 = objectStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2));
+ 
+     // Prewarm CachedStore
+     CachedStore.setCachePrewarmedState(false);
+     CachedStore.prewarm(objectStore);
+ 
+     // Read database, table, partition via CachedStore
+     Database dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+     Assert.assertEquals(db, dbRead);
+     Table tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+     Assert.assertEquals(tbl, tblRead);
+     Partition ptn1Read = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1));
+     Assert.assertEquals(ptn1, ptn1Read);
+     Partition ptn2Read = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2));
+     Assert.assertEquals(ptn2, ptn2Read);
+ 
+     // Add a new partition via ObjectStore
+     final String ptnColVal3 = "ccc";
+     Partition ptn3 =
+         new Partition(Arrays.asList(ptnColVal3), dbName, tblName, 0, 0, tbl.getSd(), partParams);
+     ptn3.setCatName(DEFAULT_CATALOG_NAME);
+     objectStore.addPartition(ptn3);
+     ptn3 = objectStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal3));
+ 
+     // Alter an existing partition ("aaa") via ObjectStore
+     final String ptnColVal1Alt = "aaaAlt";
+     Partition ptn1Atl =
+         new Partition(Arrays.asList(ptnColVal1Alt), dbName, tblName, 0, 0, tbl.getSd(), partParams);
+     ptn1Atl.setCatName(DEFAULT_CATALOG_NAME);
 -    objectStore.alterPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1), ptn1Atl);
++    objectStore.alterPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1), ptn1Atl, -1, null);
+     ptn1Atl = objectStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1Alt));
+ 
+     // Drop an existing partition ("bbb") via ObjectStore
+     objectStore.dropPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2));
+ 
+     // We update twice to accurately detect if cache is dirty or not
+     updateCache(cachedStore);
+     updateCache(cachedStore);
+ 
+     // Read the newly added partition via CachedStore
+     Partition ptnRead = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal3));
+     Assert.assertEquals(ptn3, ptnRead);
+ 
+     // Read the altered partition via CachedStore
+     ptnRead = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1Alt));
+     Assert.assertEquals(ptn1Atl, ptnRead);
+ 
+     // Try to read the dropped partition via CachedStore
+     try {
+       ptnRead = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2));
+       Assert.fail("The partition: " + ptnColVal2
+           + " should have been removed from the cache after running the update service");
+     } catch (NoSuchObjectException e) {
+       // Expected
+     }
+     // Clean up
+     objectStore.dropPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1Alt));
+     objectStore.dropPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal3));
+     objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+     objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName);
+     sharedCache.getDatabaseCache().clear();
+     sharedCache.getTableCache().clear();
+     sharedCache.getSdCache().clear();
+   }
+ 
+   //@Test
+   public void testTableColStatsOps() throws Exception {
+     // Add a db via ObjectStore
+     String dbName = "testTableColStatsOps";
+     String dbOwner = "user1";
+     Database db = createTestDb(dbName, dbOwner);
+     objectStore.createDatabase(db);
+     db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+ 
+     // Add a table via ObjectStore
+     final String tblName = "tbl";
+     final String tblOwner = "user1";
+     final FieldSchema col1 = new FieldSchema("col1", "int", "integer column");
+     // Stats values for col1
+     long col1LowVal = 5;
+     long col1HighVal = 500;
+     long col1Nulls = 10;
+     long col1DV = 20;
+     final  FieldSchema col2 = new FieldSchema("col2", "string", "string column");
+     // Stats values for col2
+     long col2MaxColLen = 100;
+     double col2AvgColLen = 45.5;
+     long col2Nulls = 5;
+     long col2DV = 40;
+     final FieldSchema col3 = new FieldSchema("col3", "boolean", "boolean column");
+     // Stats values for col3
+     long col3NumTrues = 100;
+     long col3NumFalses = 30;
+     long col3Nulls = 10;
+     final List<FieldSchema> cols = new ArrayList<>();
+     cols.add(col1);
+     cols.add(col2);
+     cols.add(col3);
+     FieldSchema ptnCol1 = new FieldSchema("part1", "string", "string partition column");
+     List<FieldSchema> ptnCols = new ArrayList<FieldSchema>();
+     ptnCols.add(ptnCol1);
+     Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols);
+     objectStore.createTable(tbl);
+     tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+ 
+     // Add ColumnStatistics for tbl to metastore DB via ObjectStore
+     ColumnStatistics stats = new ColumnStatistics();
+     ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName);
+     List<ColumnStatisticsObj> colStatObjs = new ArrayList<>();
+ 
+     // Col1
+     ColumnStatisticsData data1 = new ColumnStatisticsData();
+     ColumnStatisticsObj col1Stats = new ColumnStatisticsObj(col1.getName(), col1.getType(), data1);
+     LongColumnStatsDataInspector longStats = new LongColumnStatsDataInspector();
+     longStats.setLowValue(col1LowVal);
+     longStats.setHighValue(col1HighVal);
+     longStats.setNumNulls(col1Nulls);
+     longStats.setNumDVs(col1DV);
+     data1.setLongStats(longStats);
+     colStatObjs.add(col1Stats);
+ 
+     // Col2
+     ColumnStatisticsData data2 = new ColumnStatisticsData();
+     ColumnStatisticsObj col2Stats = new ColumnStatisticsObj(col2.getName(), col2.getType(), data2);
+     StringColumnStatsDataInspector stringStats = new StringColumnStatsDataInspector();
+     stringStats.setMaxColLen(col2MaxColLen);
+     stringStats.setAvgColLen(col2AvgColLen);
+     stringStats.setNumNulls(col2Nulls);
+     stringStats.setNumDVs(col2DV);
+     data2.setStringStats(stringStats);
+     colStatObjs.add(col2Stats);
+ 
+     // Col3
+     ColumnStatisticsData data3 = new ColumnStatisticsData();
+     ColumnStatisticsObj col3Stats = new ColumnStatisticsObj(col3.getName(), col3.getType(), data3);
+     BooleanColumnStatsData boolStats = new BooleanColumnStatsData();
+     boolStats.setNumTrues(col3NumTrues);
+     boolStats.setNumFalses(col3NumFalses);
+     boolStats.setNumNulls(col3Nulls);
+     data3.setBooleanStats(boolStats);
+     colStatObjs.add(col3Stats);
+ 
+     stats.setStatsDesc(statsDesc);
+     stats.setStatsObj(colStatObjs);
+ 
+     // Save to DB
 -    objectStore.updateTableColumnStatistics(stats);
++    objectStore.updateTableColumnStatistics(stats, -1, null, -1);
+ 
+     // Prewarm CachedStore
+     CachedStore.setCachePrewarmedState(false);
+     CachedStore.prewarm(objectStore);
+ 
+     // Read table stats via CachedStore
+     ColumnStatistics newStats =
+         cachedStore.getTableColumnStatistics(DEFAULT_CATALOG_NAME, dbName, tblName,
+             Arrays.asList(col1.getName(), col2.getName(), col3.getName()));
+     Assert.assertEquals(stats, newStats);
+ 
+     // Clean up
+     objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+     objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName);
+     sharedCache.getDatabaseCache().clear();
+     sharedCache.getTableCache().clear();
+     sharedCache.getSdCache().clear();
+   }
+ 
+   /**********************************************************************************************
+    * Methods that test SharedCache
+    *********************************************************************************************/
+ 
+   @Test
+   public void testSharedStoreDb() {
+     Database db1 = createTestDb("db1", "user1");
+     Database db2 = createTestDb("db2", "user1");
+     Database db3 = createTestDb("db3", "user1");
+     Database newDb1 = createTestDb("newdb1", "user1");
+     sharedCache.addDatabaseToCache(db1);
+     sharedCache.addDatabaseToCache(db2);
+     sharedCache.addDatabaseToCache(db3);
+     Assert.assertEquals(sharedCache.getCachedDatabaseCount(), 3);
+     sharedCache.alterDatabaseInCache(DEFAULT_CATALOG_NAME, "db1", newDb1);
+     Assert.assertEquals(sharedCache.getCachedDatabaseCount(), 3);
+     sharedCache.removeDatabaseFromCache(DEFAULT_CATALOG_NAME, "db2");
+     Assert.assertEquals(sharedCache.getCachedDatabaseCount(), 2);
+     List<String> dbs = sharedCache.listCachedDatabases(DEFAULT_CATALOG_NAME);
+     Assert.assertEquals(dbs.size(), 2);
+     Assert.assertTrue(dbs.contains("newdb1"));
+     Assert.assertTrue(dbs.contains("db3"));
+   }
+ 
+   @Test
+   public void testSharedStoreTable() {
+     Table tbl1 = new Table();
+     StorageDescriptor sd1 = new StorageDescriptor();
+     List<FieldSchema> cols1 = new ArrayList<>();
+     cols1.add(new FieldSchema("col1", "int", ""));
+     Map<String, String> params1 = new HashMap<>();
+     params1.put("key", "value");
+     sd1.setCols(cols1);
+     sd1.setParameters(params1);
+     sd1.setLocation("loc1");
+     tbl1.setSd(sd1);
+     tbl1.setPartitionKeys(new ArrayList<>());
+ 
+     Table tbl2 = new Table();
+     StorageDescriptor sd2 = new StorageDescriptor();
+     List<FieldSchema> cols2 = new ArrayList<>();
+     cols2.add(new FieldSchema("col1", "int", ""));
+     Map<String, String> params2 = new HashMap<>();
+     params2.put("key", "value");
+     sd2.setCols(cols2);
+     sd2.setParameters(params2);
+     sd2.setLocation("loc2");
+     tbl2.setSd(sd2);
+     tbl2.setPartitionKeys(new ArrayList<>());
+ 
+     Table tbl3 = new Table();
+     StorageDescriptor sd3 = new StorageDescriptor();
+     List<FieldSchema> cols3 = new ArrayList<>();
+     cols3.add(new FieldSchema("col3", "int", ""));
+     Map<String, String> params3 = new HashMap<>();
+     params3.put("key2", "value2");
+     sd3.setCols(cols3);
+     sd3.setParameters(params3);
+     sd3.setLocation("loc3");
+     tbl3.setSd(sd3);
+     tbl3.setPartitionKeys(new ArrayList<>());
+ 
+     Table newTbl1 = new Table();
+     newTbl1.setDbName("db2");
+     newTbl1.setTableName("tbl1");
+     StorageDescriptor newSd1 = new StorageDescriptor();
+     List<FieldSchema> newCols1 = new ArrayList<>();
+     newCols1.add(new FieldSchema("newcol1", "int", ""));
+     Map<String, String> newParams1 = new HashMap<>();
+     newParams1.put("key", "value");
+     newSd1.setCols(newCols1);
+     newSd1.setParameters(params1);
+     newSd1.setLocation("loc1");
+     newTbl1.setSd(newSd1);
+     newTbl1.setPartitionKeys(new ArrayList<>());
+ 
+     sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl1", tbl1);
+     sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl2", tbl2);
+     sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl3", tbl3);
+     sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db2", "tbl1", tbl1);
+ 
+     Assert.assertEquals(sharedCache.getCachedTableCount(), 4);
+     Assert.assertEquals(sharedCache.getSdCache().size(), 2);
+ 
+     Table t = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl1");
+     Assert.assertEquals(t.getSd().getLocation(), "loc1");
+ 
+     sharedCache.removeTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl1");
+     Assert.assertEquals(sharedCache.getCachedTableCount(), 3);
+     Assert.assertEquals(sharedCache.getSdCache().size(), 2);
+ 
+     sharedCache.alterTableInCache(DEFAULT_CATALOG_NAME, "db2", "tbl1", newTbl1);
+     Assert.assertEquals(sharedCache.getCachedTableCount(), 3);
+     Assert.assertEquals(sharedCache.getSdCache().size(), 3);
+ 
+     sharedCache.removeTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl2");
+     Assert.assertEquals(sharedCache.getCachedTableCount(), 2);
+     Assert.assertEquals(sharedCache.getSdCache().size(), 2);
+   }
+ 
+ 
+   @Test
+   public void testSharedStorePartition() {
+     String dbName = "db1";
+     String tbl1Name = "tbl1";
+     String tbl2Name = "tbl2";
+     String owner = "user1";
+     Database db = createTestDb(dbName, owner);
+     sharedCache.addDatabaseToCache(db);
+     FieldSchema col1 = new FieldSchema("col1", "int", "integer column");
+     FieldSchema col2 = new FieldSchema("col2", "string", "string column");
+     List<FieldSchema> cols = new ArrayList<FieldSchema>();
+     cols.add(col1);
+     cols.add(col2);
+     List<FieldSchema> ptnCols = new ArrayList<FieldSchema>();
+     Table tbl1 = createTestTbl(dbName, tbl1Name, owner, cols, ptnCols);
+     sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, tbl1);
+     Table tbl2 = createTestTbl(dbName, tbl2Name, owner, cols, ptnCols);
+     sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, tbl2);
+ 
+     Partition part1 = new Partition();
+     StorageDescriptor sd1 = new StorageDescriptor();
+     List<FieldSchema> cols1 = new ArrayList<>();
+     cols1.add(new FieldSchema("col1", "int", ""));
+     Map<String, String> params1 = new HashMap<>();
+     params1.put("key", "value");
+     sd1.setCols(cols1);
+     sd1.setParameters(params1);
+     sd1.setLocation("loc1");
+     part1.setSd(sd1);
+     part1.setValues(Arrays.asList("201701"));
+ 
+     Partition part2 = new Partition();
+     StorageDescriptor sd2 = new StorageDescriptor();
+     List<FieldSchema> cols2 = new ArrayList<>();
+     cols2.add(new FieldSchema("col1", "int", ""));
+     Map<String, String> params2 = new HashMap<>();
+     params2.put("key", "value");
+     sd2.setCols(cols2);
+     sd2.setParameters(params2);
+     sd2.setLocation("loc2");
+     part2.setSd(sd2);
+     part2.setValues(Arrays.asList("201702"));
+ 
+     Partition part3 = new Partition();
+     StorageDescriptor sd3 = new StorageDescriptor();
+     List<FieldSchema> cols3 = new ArrayList<>();
+     cols3.add(new FieldSchema("col3", "int", ""));
+     Map<String, String> params3 = new HashMap<>();
+     params3.put("key2", "value2");
+     sd3.setCols(cols3);
+     sd3.setParameters(params3);
+     sd3.setLocation("loc3");
+     part3.setSd(sd3);
+     part3.setValues(Arrays.asList("201703"));
+ 
+     Partition newPart1 = new Partition();
+     newPart1.setDbName(dbName);
+     newPart1.setTableName(tbl1Name);
+     StorageDescriptor newSd1 = new StorageDescriptor();
+     List<FieldSchema> newCols1 = new ArrayList<>();
+     newCols1.add(new FieldSchema("newcol1", "int", ""));
+     Map<String, String> newParams1 = new HashMap<>();
+     newParams1.put("key", "value");
+     newSd1.setCols(newCols1);
+     newSd1.setParameters(params1);
+     newSd1.setLocation("loc1new");
+     newPart1.setSd(newSd1);
+     newPart1.setValues(Arrays.asList("201701"));
+ 
+     sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part1);
+     sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part2);
+     sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part3);
+     sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, part1);
+ 
+     Partition t = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, Arrays.asList("201701"));
+     Assert.assertEquals(t.getSd().getLocation(), "loc1");
+ 
+     sharedCache.removePartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, Arrays.asList("201701"));
+     t = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, Arrays.asList("201701"));
+     Assert.assertNull(t);
+ 
+     sharedCache.alterPartitionInCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, Arrays.asList("201701"), newPart1);
+     t = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, Arrays.asList("201701"));
+     Assert.assertEquals(t.getSd().getLocation(), "loc1new");
+   }
+ 
+   @Test
+   public void testAggrStatsRepeatedRead() throws Exception {
+     String dbName = "testTableColStatsOps";
+     String tblName = "tbl";
+     String colName = "f1";
+ 
+     Database db = new DatabaseBuilder()
+         .setName(dbName)
+         .setLocation("some_location")
+         .build(conf);
+     cachedStore.createDatabase(db);
+ 
+     List<FieldSchema> cols = new ArrayList<>();
+     cols.add(new FieldSchema(colName, "int", null));
+     List<FieldSchema> partCols = new ArrayList<>();
+     partCols.add(new FieldSchema("col", "int", null));
+     StorageDescriptor sd =
+         new StorageDescriptor(cols, null, "input", "output", false, 0, new SerDeInfo("serde", "seriallib", new HashMap<>()),
+             null, null, null);
+ 
+     Table tbl =
+         new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(),
+             null, null, TableType.MANAGED_TABLE.toString());
+     tbl.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.createTable(tbl);
+ 
+     List<String> partVals1 = new ArrayList<>();
+     partVals1.add("1");
+     List<String> partVals2 = new ArrayList<>();
+     partVals2.add("2");
+ 
+     Partition ptn1 =
+         new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>());
+     ptn1.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.addPartition(ptn1);
+     Partition ptn2 =
+         new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>());
+     ptn2.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.addPartition(ptn2);
+ 
+     ColumnStatistics stats = new ColumnStatistics();
+     ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName);
+     statsDesc.setPartName("col");
+     List<ColumnStatisticsObj> colStatObjs = new ArrayList<>();
+ 
+     ColumnStatisticsData data = new ColumnStatisticsData();
+     ColumnStatisticsObj colStats = new ColumnStatisticsObj(colName, "int", data);
+     LongColumnStatsDataInspector longStats = new LongColumnStatsDataInspector();
+     longStats.setLowValue(0);
+     longStats.setHighValue(100);
+     longStats.setNumNulls(50);
+     longStats.setNumDVs(30);
+     data.setLongStats(longStats);
+     colStatObjs.add(colStats);
+ 
+     stats.setStatsDesc(statsDesc);
+     stats.setStatsObj(colStatObjs);
+ 
 -    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1);
 -    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2);
++    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1, -1, null, -1);
++    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2, -1, null, -1);
+ 
+     List<String> colNames = new ArrayList<>();
+     colNames.add(colName);
+     List<String> aggrPartVals = new ArrayList<>();
+     aggrPartVals.add("1");
+     aggrPartVals.add("2");
+     AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100);
+     aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100);
+   }
+ 
+   @Test
+   public void testPartitionAggrStats() throws Exception {
+     String dbName = "testTableColStatsOps1";
+     String tblName = "tbl1";
+     String colName = "f1";
+ 
+     Database db = new Database(dbName, null, "some_location", null);
+     db.setCatalogName(DEFAULT_CATALOG_NAME);
+     cachedStore.createDatabase(db);
+ 
+     List<FieldSchema> cols = new ArrayList<>();
+     cols.add(new FieldSchema(colName, "int", null));
+     List<FieldSchema> partCols = new ArrayList<>();
+     partCols.add(new FieldSchema("col", "int", null));
+     StorageDescriptor sd =
+         new StorageDescriptor(cols, null, "input", "output", false, 0, new SerDeInfo("serde", "seriallib", new HashMap<>()),
+             null, null, null);
+ 
+     Table tbl =
+         new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(),
+             null, null, TableType.MANAGED_TABLE.toString());
+     tbl.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.createTable(tbl);
+ 
+     List<String> partVals1 = new ArrayList<>();
+     partVals1.add("1");
+     List<String> partVals2 = new ArrayList<>();
+     partVals2.add("2");
+ 
+     Partition ptn1 =
+         new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>());
+     ptn1.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.addPartition(ptn1);
+     Partition ptn2 =
+         new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>());
+     ptn2.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.addPartition(ptn2);
+ 
+     ColumnStatistics stats = new ColumnStatistics();
+     ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName);
+     statsDesc.setPartName("col");
+     List<ColumnStatisticsObj> colStatObjs = new ArrayList<>();
+ 
+     ColumnStatisticsData data = new ColumnStatisticsData();
+     ColumnStatisticsObj colStats = new ColumnStatisticsObj(colName, "int", data);
+     LongColumnStatsDataInspector longStats = new LongColumnStatsDataInspector();
+     longStats.setLowValue(0);
+     longStats.setHighValue(100);
+     longStats.setNumNulls(50);
+     longStats.setNumDVs(30);
+     data.setLongStats(longStats);
+     colStatObjs.add(colStats);
+ 
+     stats.setStatsDesc(statsDesc);
+     stats.setStatsObj(colStatObjs);
+ 
 -    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1);
++    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1, -1, null, -1);
+ 
+     longStats.setNumDVs(40);
 -    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2);
++    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2, -1, null, -1);
+ 
+     List<String> colNames = new ArrayList<>();
+     colNames.add(colName);
+     List<String> aggrPartVals = new ArrayList<>();
+     aggrPartVals.add("1");
+     aggrPartVals.add("2");
+     AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 40);
+     aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 40);
+   }
+ 
+   @Test
+   public void testPartitionAggrStatsBitVector() throws Exception {
+     String dbName = "testTableColStatsOps2";
+     String tblName = "tbl2";
+     String colName = "f1";
+ 
+     Database db = new Database(dbName, null, "some_location", null);
+     db.setCatalogName(DEFAULT_CATALOG_NAME);
+     cachedStore.createDatabase(db);
+ 
+     List<FieldSchema> cols = new ArrayList<>();
+     cols.add(new FieldSchema(colName, "int", null));
+     List<FieldSchema> partCols = new ArrayList<>();
+     partCols.add(new FieldSchema("col", "int", null));
+     StorageDescriptor sd =
+         new StorageDescriptor(cols, null, "input", "output", false, 0, new SerDeInfo("serde", "seriallib", new HashMap<>()),
+             null, null, null);
+ 
+     Table tbl =
+         new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(),
+             null, null, TableType.MANAGED_TABLE.toString());
+     tbl.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.createTable(tbl);
+ 
+     List<String> partVals1 = new ArrayList<>();
+     partVals1.add("1");
+     List<String> partVals2 = new ArrayList<>();
+     partVals2.add("2");
+ 
+     Partition ptn1 =
+         new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>());
+     ptn1.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.addPartition(ptn1);
+     Partition ptn2 =
+         new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>());
+     ptn2.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.addPartition(ptn2);
+ 
+     ColumnStatistics stats = new ColumnStatistics();
+     ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName);
+     statsDesc.setPartName("col");
+     List<ColumnStatisticsObj> colStatObjs = new ArrayList<>();
+ 
+     ColumnStatisticsData data = new ColumnStatisticsData();
+     ColumnStatisticsObj colStats = new ColumnStatisticsObj(colName, "int", data);
+     LongColumnStatsDataInspector longStats = new LongColumnStatsDataInspector();
+     longStats.setLowValue(0);
+     longStats.setHighValue(100);
+     longStats.setNumNulls(50);
+     longStats.setNumDVs(30);
+ 
+     HyperLogLog hll = HyperLogLog.builder().build();
+     hll.addLong(1);
+     hll.addLong(2);
+     hll.addLong(3);
+     longStats.setBitVectors(hll.serialize());
+ 
+     data.setLongStats(longStats);
+     colStatObjs.add(colStats);
+ 
+     stats.setStatsDesc(statsDesc);
+     stats.setStatsObj(colStatObjs);
+ 
 -    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1);
++    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1, -1, null, -1);
+ 
+     longStats.setNumDVs(40);
+     hll = HyperLogLog.builder().build();
+     hll.addLong(2);
+     hll.addLong(3);
+     hll.addLong(4);
+     hll.addLong(5);
+     longStats.setBitVectors(hll.serialize());
+ 
 -    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2);
++    cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2, -1, null, -1);
+ 
+     List<String> colNames = new ArrayList<>();
+     colNames.add(colName);
+     List<String> aggrPartVals = new ArrayList<>();
+     aggrPartVals.add("1");
+     aggrPartVals.add("2");
+     AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 5);
+     aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 5);
+   }
+ 
+   @Test
+   public void testMultiThreadedSharedCacheOps() throws Exception {
+     List<String> dbNames = new ArrayList<String>(Arrays.asList("db1", "db2", "db3", "db4", "db5"));
+     List<Callable<Object>> tasks = new ArrayList<Callable<Object>>();
+     ExecutorService executor = Executors.newFixedThreadPool(50, new ThreadFactory() {
+       @Override
+       public Thread newThread(Runnable r) {
+         Thread t = Executors.defaultThreadFactory().newThread(r);
+         t.setDaemon(true);
+         return t;
+       }
+     });
+ 
+     // Create 5 dbs
+     for (String dbName : dbNames) {
+       Callable<Object> c = new Callable<Object>() {
+         public Object call() {
+           Database db = createTestDb(dbName, "user1");
+           sharedCache.addDatabaseToCache(db);
+           return null;
+         }
+       };
+       tasks.add(c);
+     }
+     executor.invokeAll(tasks);
+     for (String dbName : dbNames) {
+       Database db = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName);
+       Assert.assertNotNull(db);
+       Assert.assertEquals(dbName, db.getName());
+     }
+ 
+     // Created 5 tables under "db1"
+     List<String> tblNames =
+         new ArrayList<String>(Arrays.asList("tbl1", "tbl2", "tbl3", "tbl4", "tbl5"));
+     tasks.clear();
+     for (String tblName : tblNames) {
+       FieldSchema col1 = new FieldSchema("col1", "int", "integer column");
+       FieldSchema col2 = new FieldSchema("col2", "string", "string column");
+       List<FieldSchema> cols = new ArrayList<FieldSchema>();
+       cols.add(col1);
+       cols.add(col2);
+       FieldSchema ptnCol1 = new FieldSchema("part1", "string", "string partition column");
+       List<FieldSchema> ptnCols = new ArrayList<FieldSchema>();
+       ptnCols.add(ptnCol1);
+       Callable<Object> c = new Callable<Object>() {
+         public Object call() {
+           Table tbl = createTestTbl(dbNames.get(0), tblName, "user1", cols, ptnCols);
+           sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, tbl);
+           return null;
+         }
+       };
+       tasks.add(c);
+     }
+     executor.invokeAll(tasks);
+     for (String tblName : tblNames) {
+       Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName);
+       Assert.assertNotNull(tbl);
+       Assert.assertEquals(tblName, tbl.getTableName());
+     }
+ 
+     // Add 5 partitions to all tables
+     List<String> ptnVals = new ArrayList<String>(Arrays.asList("aaa", "bbb", "ccc", "ddd", "eee"));
+     tasks.clear();
+     for (String tblName : tblNames) {
+       Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName);
+       for (String ptnVal : ptnVals) {
+         Map<String, String> partParams = new HashMap<String, String>();
+         Callable<Object> c = new Callable<Object>() {
+           public Object call() {
+             Partition ptn = new Partition(Arrays.asList(ptnVal), dbNames.get(0), tblName, 0, 0,
+                 tbl.getSd(), partParams);
+             sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, ptn);
+             return null;
+           }
+         };
+         tasks.add(c);
+       }
+     }
+     executor.invokeAll(tasks);
+     for (String tblName : tblNames) {
+       for (String ptnVal : ptnVals) {
+         Partition ptn = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, Arrays.asList(ptnVal));
+         Assert.assertNotNull(ptn);
+         Assert.assertEquals(tblName, ptn.getTableName());
+         Assert.assertEquals(tblName, ptn.getTableName());
+         Assert.assertEquals(Arrays.asList(ptnVal), ptn.getValues());
+       }
+     }
+ 
+     // Drop all partitions from "tbl1", "tbl2", "tbl3" and add 2 new partitions to "tbl4" and "tbl5"
+     List<String> newPtnVals = new ArrayList<String>(Arrays.asList("fff", "ggg"));
+     List<String> dropPtnTblNames = new ArrayList<String>(Arrays.asList("tbl1", "tbl2", "tbl3"));
+     List<String> addPtnTblNames = new ArrayList<String>(Arrays.asList("tbl4", "tbl5"));
+     tasks.clear();
+     for (String tblName : dropPtnTblNames) {
+       for (String ptnVal : ptnVals) {
+         Callable<Object> c = new Callable<Object>() {
+           public Object call() {
+             sharedCache.removePartitionFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, Arrays.asList(ptnVal));
+             return null;
+           }
+         };
+         tasks.add(c);
+       }
+     }
+     for (String tblName : addPtnTblNames) {
+       Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName);
+       for (String ptnVal : newPtnVals) {
+         Map<String, String> partParams = new HashMap<String, String>();
+         Callable<Object> c = new Callable<Object>() {
+           public Object call() {
+             Partition ptn = new Partition(Arrays.asList(ptnVal), dbNames.get(0), tblName, 0, 0,
+                 tbl.getSd(), partParams);
+             sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, ptn);
+             return null;
+           }
+         };
+         tasks.add(c);
+       }
+     }
+     executor.invokeAll(tasks);
+     for (String tblName : addPtnTblNames) {
+       for (String ptnVal : newPtnVals) {
+         Partition ptn = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, Arrays.asList(ptnVal));
+         Assert.assertNotNull(ptn);
+         Assert.assertEquals(tblName, ptn.getTableName());
+         Assert.assertEquals(tblName, ptn.getTableName());
+         Assert.assertEquals(Arrays.asList(ptnVal), ptn.getValues());
+       }
+     }
+     for (String tblName : dropPtnTblNames) {
+       List<Partition> ptns = sharedCache.listCachedPartitions(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, 100);
+       Assert.assertEquals(0, ptns.size());
+     }
+     sharedCache.getDatabaseCache().clear();
+     sharedCache.getTableCache().clear();
+     sharedCache.getSdCache().clear();
+   }
+ 
+   private Database createTestDb(String dbName, String dbOwner) {
+     String dbDescription = dbName;
+     String dbLocation = "file:/tmp";
+     Map<String, String> dbParams = new HashMap<>();
+     Database db = new Database(dbName, dbDescription, dbLocation, dbParams);
+     db.setOwnerName(dbOwner);
+     db.setOwnerType(PrincipalType.USER);
+     db.setCatalogName(DEFAULT_CATALOG_NAME);
+     return db;
+   }
+ 
+   private Table createTestTbl(String dbName, String tblName, String tblOwner,
+       List<FieldSchema> cols, List<FieldSchema> ptnCols) {
+     String serdeLocation = "file:/tmp";
+     Map<String, String> serdeParams = new HashMap<>();
+     Map<String, String> tblParams = new HashMap<>();
+     SerDeInfo serdeInfo = new SerDeInfo("serde", "seriallib", new HashMap<>());
+     StorageDescriptor sd = new StorageDescriptor(cols, serdeLocation, "input", "output", false, 0,
+         serdeInfo, null, null, serdeParams);
+     sd.setStoredAsSubDirectories(false);
+     Table tbl = new Table(tblName, dbName, tblOwner, 0, 0, 0, sd, ptnCols, tblParams, null, null,
+         TableType.MANAGED_TABLE.toString());
+     tbl.setCatName(DEFAULT_CATALOG_NAME);
+     return tbl;
+   }
+ 
+   // This method will return only after the cache has updated once
+   private void updateCache(CachedStore cachedStore) throws InterruptedException {
+     int maxTries = 100000;
+     long updateCountBefore = cachedStore.getCacheUpdateCount();
+     // Start the CachedStore update service
+     CachedStore.startCacheUpdateService(cachedStore.getConf(), true, false);
+     while ((cachedStore.getCacheUpdateCount() != (updateCountBefore + 1)) && (maxTries-- > 0)) {
+       Thread.sleep(1000);
+     }
+     CachedStore.stopCacheUpdateService(100);
+   }
+ }


[11/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
index 0000000,e58ee33..e985366
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
@@@ -1,0 -1,1175 +1,1177 @@@
+ -- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+ -- NOTE: Some versions of SchemaTool do not automatically generate this table.
+ -- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+ CREATE TABLE SEQUENCE_TABLE
+ (
+    SEQUENCE_NAME VARCHAR2(255) NOT NULL,
+    NEXT_VAL NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME);
+ 
+ INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1);
+ 
+ -- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
+ -- This table is required if datanucleus.autoStartMechanism=SchemaTable
+ -- NOTE: Some versions of SchemaTool do not automatically generate this table.
+ -- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+ CREATE TABLE NUCLEUS_TABLES
+ (
+    CLASS_NAME VARCHAR2(128) NOT NULL,
+    TABLE_NAME VARCHAR2(128) NOT NULL,
+    TYPE VARCHAR2(4) NOT NULL,
+    OWNER VARCHAR2(2) NOT NULL,
+    VERSION VARCHAR2(20) NOT NULL,
+    INTERFACE_NAME VARCHAR2(255) NULL
+ );
+ 
+ ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME);
+ 
+ -- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ CREATE TABLE PART_COL_PRIVS
+ (
+     PART_COLUMN_GRANT_ID NUMBER NOT NULL,
+     "COLUMN_NAME" VARCHAR2(767) NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     GRANT_OPTION NUMBER (5) NOT NULL,
+     GRANTOR VARCHAR2(128) NULL,
+     GRANTOR_TYPE VARCHAR2(128) NULL,
+     PART_ID NUMBER NULL,
+     PRINCIPAL_NAME VARCHAR2(128) NULL,
+     PRINCIPAL_TYPE VARCHAR2(128) NULL,
+     PART_COL_PRIV VARCHAR2(128) NULL,
+     AUTHORIZER VARCHAR2(128) NULL
+ );
+ 
+ ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
+ 
+ -- Table CDS.
+ CREATE TABLE CDS
+ (
+     CD_ID NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+ 
+ -- Table COLUMNS_V2 for join relationship
+ CREATE TABLE COLUMNS_V2
+ (
+     CD_ID NUMBER NOT NULL,
+     "COMMENT" VARCHAR2(256) NULL,
+     "COLUMN_NAME" VARCHAR2(767) NOT NULL,
+     TYPE_NAME CLOB NOT NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
+ 
+ -- Table PARTITION_KEY_VALS for join relationship
+ CREATE TABLE PARTITION_KEY_VALS
+ (
+     PART_ID NUMBER NOT NULL,
+     PART_KEY_VAL VARCHAR2(256) NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
+ 
+ CREATE TABLE CTLGS (
+     CTLG_ID NUMBER PRIMARY KEY,
+     "NAME" VARCHAR2(256),
+     "DESC" VARCHAR2(4000),
+     LOCATION_URI VARCHAR2(4000) NOT NULL,
+     UNIQUE ("NAME")
+ );
+ 
+ -- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+ CREATE TABLE DBS
+ (
+     DB_ID NUMBER NOT NULL,
+     "DESC" VARCHAR2(4000) NULL,
+     DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
+     "NAME" VARCHAR2(128) NULL,
+     OWNER_NAME VARCHAR2(128) NULL,
+     OWNER_TYPE VARCHAR2(10) NULL,
+     CTLG_NAME VARCHAR2(256)
+ );
+ 
+ ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+ 
+ -- Table PARTITION_PARAMS for join relationship
+ CREATE TABLE PARTITION_PARAMS
+ (
+     PART_ID NUMBER NOT NULL,
+     PARAM_KEY VARCHAR2(256) NOT NULL,
+     PARAM_VALUE VARCHAR2(4000) NULL
+ );
+ 
+ ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
+ 
+ -- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+ CREATE TABLE SERDES
+ (
+     SERDE_ID NUMBER NOT NULL,
+     "NAME" VARCHAR2(128) NULL,
+     SLIB VARCHAR2(4000) NULL,
+     "DESCRIPTION" VARCHAR2(4000),
+     "SERIALIZER_CLASS" VARCHAR2(4000),
+     "DESERIALIZER_CLASS" VARCHAR2(4000),
+     "SERDE_TYPE" NUMBER
+ );
+ 
+ ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+ 
+ -- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+ CREATE TABLE TYPES
+ (
+     TYPES_ID NUMBER NOT NULL,
+     TYPE_NAME VARCHAR2(128) NULL,
+     TYPE1 VARCHAR2(767) NULL,
+     TYPE2 VARCHAR2(767) NULL
+ );
+ 
+ ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+ 
+ -- Table PARTITION_KEYS for join relationship
+ CREATE TABLE PARTITION_KEYS
+ (
+     TBL_ID NUMBER NOT NULL,
+     PKEY_COMMENT VARCHAR2(4000) NULL,
+     PKEY_NAME VARCHAR2(128) NOT NULL,
+     PKEY_TYPE VARCHAR2(767) NOT NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
+ 
+ -- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+ CREATE TABLE ROLES
+ (
+     ROLE_ID NUMBER NOT NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     OWNER_NAME VARCHAR2(128) NULL,
+     ROLE_NAME VARCHAR2(128) NULL
+ );
+ 
+ ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+ 
+ -- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
+ CREATE TABLE PARTITIONS
+ (
+     PART_ID NUMBER NOT NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+     PART_NAME VARCHAR2(767) NULL,
+     SD_ID NUMBER NULL,
 -    TBL_ID NUMBER NULL
++    TBL_ID NUMBER NULL,
++    WRITE_ID NUMBER NULL
+ );
+ 
+ ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
+ 
+ -- Table INDEX_PARAMS for join relationship
+ CREATE TABLE INDEX_PARAMS
+ (
+     INDEX_ID NUMBER NOT NULL,
+     PARAM_KEY VARCHAR2(256) NOT NULL,
+     PARAM_VALUE VARCHAR2(4000) NULL
+ );
+ 
+ ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
+ 
+ -- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ CREATE TABLE TBL_COL_PRIVS
+ (
+     TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
+     "COLUMN_NAME" VARCHAR2(767) NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     GRANT_OPTION NUMBER (5) NOT NULL,
+     GRANTOR VARCHAR2(128) NULL,
+     GRANTOR_TYPE VARCHAR2(128) NULL,
+     PRINCIPAL_NAME VARCHAR2(128) NULL,
+     PRINCIPAL_TYPE VARCHAR2(128) NULL,
+     TBL_COL_PRIV VARCHAR2(128) NULL,
+     TBL_ID NUMBER NULL,
+     AUTHORIZER VARCHAR2(128) NULL
+ );
+ 
+ ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
+ 
+ -- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
+ CREATE TABLE IDXS
+ (
+     INDEX_ID NUMBER NOT NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)),
+     INDEX_HANDLER_CLASS VARCHAR2(4000) NULL,
+     INDEX_NAME VARCHAR2(128) NULL,
+     INDEX_TBL_ID NUMBER NULL,
+     LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+     ORIG_TBL_ID NUMBER NULL,
+     SD_ID NUMBER NULL
+ );
+ 
+ ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
+ 
+ -- Table BUCKETING_COLS for join relationship
+ CREATE TABLE BUCKETING_COLS
+ (
+     SD_ID NUMBER NOT NULL,
+     BUCKET_COL_NAME VARCHAR2(256) NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+ 
+ -- Table TYPE_FIELDS for join relationship
+ CREATE TABLE TYPE_FIELDS
+ (
+     TYPE_NAME NUMBER NOT NULL,
+     "COMMENT" VARCHAR2(256) NULL,
+     FIELD_NAME VARCHAR2(128) NOT NULL,
+     FIELD_TYPE VARCHAR2(767) NOT NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
+ 
+ -- Table SD_PARAMS for join relationship
+ CREATE TABLE SD_PARAMS
+ (
+     SD_ID NUMBER NOT NULL,
+     PARAM_KEY VARCHAR2(256) NOT NULL,
+     PARAM_VALUE CLOB NULL
+ );
+ 
+ ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
+ 
+ -- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+ CREATE TABLE GLOBAL_PRIVS
+ (
+     USER_GRANT_ID NUMBER NOT NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     GRANT_OPTION NUMBER (5) NOT NULL,
+     GRANTOR VARCHAR2(128) NULL,
+     GRANTOR_TYPE VARCHAR2(128) NULL,
+     PRINCIPAL_NAME VARCHAR2(128) NULL,
+     PRINCIPAL_TYPE VARCHAR2(128) NULL,
+     USER_PRIV VARCHAR2(128) NULL,
+     AUTHORIZER VARCHAR2(128) NULL
+ );
+ 
+ ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
+ 
+ -- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ CREATE TABLE SDS
+ (
+     SD_ID NUMBER NOT NULL,
+     CD_ID NUMBER NULL,
+     INPUT_FORMAT VARCHAR2(4000) NULL,
+     IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)),
+     LOCATION VARCHAR2(4000) NULL,
+     NUM_BUCKETS NUMBER (10) NOT NULL,
+     OUTPUT_FORMAT VARCHAR2(4000) NULL,
+     SERDE_ID NUMBER NULL,
+     IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0))
+ );
+ 
+ ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
+ 
+ -- Table TABLE_PARAMS for join relationship
+ CREATE TABLE TABLE_PARAMS
+ (
+     TBL_ID NUMBER NOT NULL,
+     PARAM_KEY VARCHAR2(256) NOT NULL,
+     PARAM_VALUE CLOB NULL
+ );
+ 
+ ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
+ 
+ -- Table SORT_COLS for join relationship
+ CREATE TABLE SORT_COLS
+ (
+     SD_ID NUMBER NOT NULL,
+     "COLUMN_NAME" VARCHAR2(767) NULL,
+     "ORDER" NUMBER (10) NOT NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+ 
+ -- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ CREATE TABLE TBL_PRIVS
+ (
+     TBL_GRANT_ID NUMBER NOT NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     GRANT_OPTION NUMBER (5) NOT NULL,
+     GRANTOR VARCHAR2(128) NULL,
+     GRANTOR_TYPE VARCHAR2(128) NULL,
+     PRINCIPAL_NAME VARCHAR2(128) NULL,
+     PRINCIPAL_TYPE VARCHAR2(128) NULL,
+     TBL_PRIV VARCHAR2(128) NULL,
+     TBL_ID NUMBER NULL,
+     AUTHORIZER VARCHAR2(128) NULL
+ );
+ 
+ ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
+ 
+ -- Table DATABASE_PARAMS for join relationship
+ CREATE TABLE DATABASE_PARAMS
+ (
+     DB_ID NUMBER NOT NULL,
+     PARAM_KEY VARCHAR2(180) NOT NULL,
+     PARAM_VALUE VARCHAR2(4000) NULL
+ );
+ 
+ ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
+ 
+ -- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ CREATE TABLE ROLE_MAP
+ (
+     ROLE_GRANT_ID NUMBER NOT NULL,
+     ADD_TIME NUMBER (10) NOT NULL,
+     GRANT_OPTION NUMBER (5) NOT NULL,
+     GRANTOR VARCHAR2(128) NULL,
+     GRANTOR_TYPE VARCHAR2(128) NULL,
+     PRINCIPAL_NAME VARCHAR2(128) NULL,
+     PRINCIPAL_TYPE VARCHAR2(128) NULL,
+     ROLE_ID NUMBER NULL
+ );
+ 
+ ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
+ 
+ -- Table SERDE_PARAMS for join relationship
+ CREATE TABLE SERDE_PARAMS
+ (
+     SERDE_ID NUMBER NOT NULL,
+     PARAM_KEY VARCHAR2(256) NOT NULL,
+     PARAM_VALUE CLOB NULL
+ );
+ 
+ ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
+ 
+ -- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ CREATE TABLE PART_PRIVS
+ (
+     PART_GRANT_ID NUMBER NOT NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     GRANT_OPTION NUMBER (5) NOT NULL,
+     GRANTOR VARCHAR2(128) NULL,
+     GRANTOR_TYPE VARCHAR2(128) NULL,
+     PART_ID NUMBER NULL,
+     PRINCIPAL_NAME VARCHAR2(128) NULL,
+     PRINCIPAL_TYPE VARCHAR2(128) NULL,
+     PART_PRIV VARCHAR2(128) NULL,
+     AUTHORIZER VARCHAR2(128) NULL
+ );
+ 
+ ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
+ 
+ -- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ CREATE TABLE DB_PRIVS
+ (
+     DB_GRANT_ID NUMBER NOT NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     DB_ID NUMBER NULL,
+     GRANT_OPTION NUMBER (5) NOT NULL,
+     GRANTOR VARCHAR2(128) NULL,
+     GRANTOR_TYPE VARCHAR2(128) NULL,
+     PRINCIPAL_NAME VARCHAR2(128) NULL,
+     PRINCIPAL_TYPE VARCHAR2(128) NULL,
+     DB_PRIV VARCHAR2(128) NULL,
+     AUTHORIZER VARCHAR2(128) NULL
+ );
+ 
+ ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
+ 
+ -- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
+ CREATE TABLE TBLS
+ (
+     TBL_ID NUMBER NOT NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     DB_ID NUMBER NULL,
+     LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+     OWNER VARCHAR2(767) NULL,
+     OWNER_TYPE VARCHAR2(10) NULL,
+     RETENTION NUMBER (10) NOT NULL,
+     SD_ID NUMBER NULL,
+     TBL_NAME VARCHAR2(256) NULL,
+     TBL_TYPE VARCHAR2(128) NULL,
+     VIEW_EXPANDED_TEXT CLOB NULL,
+     VIEW_ORIGINAL_TEXT CLOB NULL,
 -    IS_REWRITE_ENABLED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0))
++    IS_REWRITE_ENABLED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0)),
++    WRITE_ID NUMBER NULL
+ );
+ 
+ ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
+ 
+ -- Table MV_CREATION_METADATA for classes [org.apache.hadoop.hive.metastore.model.MCreationMetadata]
+ CREATE TABLE MV_CREATION_METADATA
+ (
+     MV_CREATION_METADATA_ID NUMBER NOT NULL,
+     CAT_NAME VARCHAR2(256) NOT NULL,
+     DB_NAME VARCHAR2(128) NOT NULL,
+     TBL_NAME VARCHAR2(256) NOT NULL,
+     TXN_LIST CLOB NULL,
+     MATERIALIZATION_TIME NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE MV_CREATION_METADATA ADD CONSTRAINT MV_CREATION_METADATA_PK PRIMARY KEY (MV_CREATION_METADATA_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_TABLE ON MV_CREATION_METADATA ("DB_NAME", "TBL_NAME");
+ 
+ -- Table MV_CREATION_METADATA for classes [org.apache.hadoop.hive.metastore.model.MCreationMetadata]
+ CREATE TABLE MV_TABLES_USED
+ (
+     MV_CREATION_METADATA_ID NUMBER NOT NULL,
+     TBL_ID NUMBER NOT NULL
+ );
+ 
+ -- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+ CREATE TABLE PARTITION_EVENTS
+ (
+     PART_NAME_ID NUMBER NOT NULL,
+     CAT_NAME VARCHAR2(256) NULL,
+     DB_NAME VARCHAR2(128) NULL,
+     EVENT_TIME NUMBER NOT NULL,
+     EVENT_TYPE NUMBER (10) NOT NULL,
+     PARTITION_NAME VARCHAR2(767) NULL,
+     TBL_NAME VARCHAR2(256) NULL
+ );
+ 
+ ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
+ 
+ -- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
+ CREATE TABLE SKEWED_STRING_LIST
+ (
+     STRING_LIST_ID NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
+ 
+ CREATE TABLE SKEWED_STRING_LIST_VALUES
+ (
+     STRING_LIST_ID NUMBER NOT NULL,
+     "STRING_LIST_VALUE" VARCHAR2(256) NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
+ 
+ ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+ 
+ CREATE TABLE SKEWED_COL_NAMES
+ (
+     SD_ID NUMBER NOT NULL,
+     "SKEWED_COL_NAME" VARCHAR2(256) NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+ 
+ ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
+ (
+     SD_ID NUMBER NOT NULL,
+     STRING_LIST_ID_KID NUMBER NOT NULL,
+     "LOCATION" VARCHAR2(4000) NULL
+ );
+ 
+ CREATE TABLE MASTER_KEYS
+ (
+     KEY_ID NUMBER (10) NOT NULL,
+     MASTER_KEY VARCHAR2(767) NULL
+ );
+ 
+ CREATE TABLE DELEGATION_TOKENS
+ (
+     TOKEN_IDENT VARCHAR2(767) NOT NULL,
+     TOKEN VARCHAR2(767) NULL
+ );
+ 
+ ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
+ 
+ ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+ 
+ ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE TABLE SKEWED_VALUES
+ (
+     SD_ID_OID NUMBER NOT NULL,
+     STRING_LIST_ID_EID NUMBER NOT NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
+ 
+ ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+ 
+ ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ ALTER TABLE DBS ADD CONSTRAINT CTLGS_FK FOREIGN KEY (CTLG_NAME) REFERENCES CTLGS ("NAME") INITIALLY DEFERRED;
+ 
+ -- column statistics
+ 
+ CREATE TABLE TAB_COL_STATS (
+  CS_ID NUMBER NOT NULL,
+  CAT_NAME VARCHAR2(256) NOT NULL,
+  DB_NAME VARCHAR2(128) NOT NULL,
+  TABLE_NAME VARCHAR2(256) NOT NULL,
+  COLUMN_NAME VARCHAR2(767) NOT NULL,
+  COLUMN_TYPE VARCHAR2(128) NOT NULL,
+  TBL_ID NUMBER NOT NULL,
+  LONG_LOW_VALUE NUMBER,
+  LONG_HIGH_VALUE NUMBER,
+  DOUBLE_LOW_VALUE NUMBER,
+  DOUBLE_HIGH_VALUE NUMBER,
+  BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+  BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+  NUM_NULLS NUMBER NOT NULL,
+  NUM_DISTINCTS NUMBER,
+  BIT_VECTOR BLOB,
+  AVG_COL_LEN NUMBER,
+  MAX_COL_LEN NUMBER,
+  NUM_TRUES NUMBER,
+  NUM_FALSES NUMBER,
+  LAST_ANALYZED NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+ 
+ ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
+ 
+ CREATE INDEX TAB_COL_STATS_IDX ON TAB_COL_STATS (CAT_NAME, DB_NAME, TABLE_NAME, COLUMN_NAME);
+ 
+ CREATE TABLE VERSION (
+   VER_ID NUMBER NOT NULL,
+   SCHEMA_VERSION VARCHAR(127) NOT NULL,
+   VERSION_COMMENT VARCHAR(255)
+ );
+ ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
+ 
+ CREATE TABLE PART_COL_STATS (
+  CS_ID NUMBER NOT NULL,
+  CAT_NAME VARCHAR2(256) NOT NULL,
+  DB_NAME VARCHAR2(128) NOT NULL,
+  TABLE_NAME VARCHAR2(256) NOT NULL,
+  PARTITION_NAME VARCHAR2(767) NOT NULL,
+  COLUMN_NAME VARCHAR2(767) NOT NULL,
+  COLUMN_TYPE VARCHAR2(128) NOT NULL,
+  PART_ID NUMBER NOT NULL,
+  LONG_LOW_VALUE NUMBER,
+  LONG_HIGH_VALUE NUMBER,
+  DOUBLE_LOW_VALUE NUMBER,
+  DOUBLE_HIGH_VALUE NUMBER,
+  BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+  BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+  NUM_NULLS NUMBER NOT NULL,
+  NUM_DISTINCTS NUMBER,
+  BIT_VECTOR BLOB,
+  AVG_COL_LEN NUMBER,
+  MAX_COL_LEN NUMBER,
+  NUM_TRUES NUMBER,
+  NUM_FALSES NUMBER,
+  LAST_ANALYZED NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+ 
+ ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
+ 
+ CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+ 
+ CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);
+ 
+ CREATE TABLE FUNCS (
+   FUNC_ID NUMBER NOT NULL,
+   CLASS_NAME VARCHAR2(4000),
+   CREATE_TIME NUMBER(10) NOT NULL,
+   DB_ID NUMBER,
+   FUNC_NAME VARCHAR2(128),
+   FUNC_TYPE NUMBER(10) NOT NULL,
+   OWNER_NAME VARCHAR2(128),
+   OWNER_TYPE VARCHAR2(10)
+ );
+ 
+ ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
+ 
+ CREATE TABLE FUNC_RU (
+   FUNC_ID NUMBER NOT NULL,
+   RESOURCE_TYPE NUMBER(10) NOT NULL,
+   RESOURCE_URI VARCHAR2(4000),
+   INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID, INTEGER_IDX);
+ 
+ CREATE TABLE NOTIFICATION_LOG
+ (
+     NL_ID NUMBER NOT NULL,
+     EVENT_ID NUMBER NOT NULL,
+     EVENT_TIME NUMBER(10) NOT NULL,
+     EVENT_TYPE VARCHAR2(32) NOT NULL,
+     CAT_NAME VARCHAR2(256),
+     DB_NAME VARCHAR2(128),
+     TBL_NAME VARCHAR2(256),
+     MESSAGE CLOB NULL,
+     MESSAGE_FORMAT VARCHAR(16) NULL
+ );
+ 
+ ALTER TABLE NOTIFICATION_LOG ADD CONSTRAINT NOTIFICATION_LOG_PK PRIMARY KEY (NL_ID);
+ 
+ CREATE TABLE NOTIFICATION_SEQUENCE
+ (
+     NNI_ID NUMBER NOT NULL,
+     NEXT_EVENT_ID NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID);
+ 
+ INSERT INTO NOTIFICATION_SEQUENCE (NNI_ID, NEXT_EVENT_ID) SELECT 1,1 FROM DUAL WHERE NOT EXISTS ( SELECT NEXT_EVENT_ID FROM NOTIFICATION_SEQUENCE);
+ 
+ -- Tables to manage resource plans.
+ 
+ CREATE TABLE WM_RESOURCEPLAN
+ (
+     RP_ID NUMBER NOT NULL,
+     "NAME" VARCHAR2(128) NOT NULL,
+     QUERY_PARALLELISM NUMBER(10),
+     STATUS VARCHAR2(20) NOT NULL,
+     DEFAULT_POOL_ID NUMBER
+ );
+ 
+ ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID);
+ 
+ CREATE TABLE WM_POOL
+ (
+     POOL_ID NUMBER NOT NULL,
+     RP_ID NUMBER NOT NULL,
+     PATH VARCHAR2(1024) NOT NULL,
+     ALLOC_FRACTION NUMBER,
+     QUERY_PARALLELISM NUMBER(10),
+     SCHEDULING_POLICY VARCHAR2(1024)
+ );
+ 
+ ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID);
+ 
+ CREATE TABLE WM_TRIGGER
+ (
+     TRIGGER_ID NUMBER NOT NULL,
+     RP_ID NUMBER NOT NULL,
+     "NAME" VARCHAR2(128) NOT NULL,
+     TRIGGER_EXPRESSION VARCHAR2(1024),
+     ACTION_EXPRESSION VARCHAR2(1024),
+     IS_IN_UNMANAGED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_IN_UNMANAGED IN (1,0))
+ );
+ 
+ ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_PK PRIMARY KEY (TRIGGER_ID);
+ 
+ CREATE TABLE WM_POOL_TO_TRIGGER
+ (
+     POOL_ID NUMBER NOT NULL,
+     TRIGGER_ID NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_PK PRIMARY KEY (POOL_ID, TRIGGER_ID);
+ 
+ CREATE TABLE WM_MAPPING
+ (
+     MAPPING_ID NUMBER NOT NULL,
+     RP_ID NUMBER NOT NULL,
+     ENTITY_TYPE VARCHAR2(128) NOT NULL,
+     ENTITY_NAME VARCHAR2(128) NOT NULL,
+     POOL_ID NUMBER NOT NULL,
+     ORDERING NUMBER(10)
+ );
+ 
+ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID);
+ 
+ -- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
+ 
+ CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (AUTHORIZER,PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ 
+ -- Constraints for table COLUMNS_V2
+ ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
+ 
+ 
+ -- Constraints for table PARTITION_KEY_VALS
+ ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
+ 
+ 
+ -- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
+ CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME", CTLG_NAME);
+ 
+ 
+ -- Constraints for table PARTITION_PARAMS
+ ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
+ 
+ 
+ -- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+ 
+ -- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
+ CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME);
+ 
+ 
+ -- Constraints for table PARTITION_KEYS
+ ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
+ 
+ 
+ -- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
+ CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
+ 
+ 
+ -- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
+ ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+ 
+ ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
+ 
+ CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
+ 
+ 
+ -- Constraints for table INDEX_PARAMS
+ ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
+ 
+ 
+ -- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (AUTHORIZER,TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
+ 
+ 
+ -- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
+ ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+ 
+ ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+ 
+ CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
+ 
+ CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID);
+ 
+ CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
+ 
+ CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID);
+ 
+ 
+ -- Constraints for table BUCKETING_COLS
+ ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
+ 
+ 
+ -- Constraints for table TYPE_FIELDS
+ ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
+ 
+ 
+ -- Constraints for table SD_PARAMS
+ ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
+ 
+ 
+ -- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+ CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (AUTHORIZER,PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ 
+ -- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+ ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
+ CREATE INDEX SDS_N50 ON SDS (CD_ID);
+ 
+ 
+ -- Constraints for table TABLE_PARAMS
+ ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
+ 
+ 
+ -- Constraints for table SORT_COLS
+ ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
+ 
+ 
+ -- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
+ 
+ CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (AUTHORIZER,TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ 
+ -- Constraints for table DATABASE_PARAMS
+ ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
+ 
+ 
+ -- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
+ 
+ CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
+ 
+ 
+ -- Constraints for table SERDE_PARAMS
+ ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
+ 
+ 
+ -- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (AUTHORIZER,PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
+ 
+ 
+ -- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+ 
+ CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (AUTHORIZER,DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
+ 
+ 
+ -- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
+ ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+ 
+ ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
+ 
+ CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
+ 
+ 
+ -- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+ CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
+ 
+ 
+ -- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
+ ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED;
+ 
+ CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME, DB_ID);
+ 
+ CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
+ 
+ 
+ -- Constraints for table FUNC_RU for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
+ ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) INITIALLY DEFERRED;
+ 
+ CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
+ 
+ CREATE TABLE KEY_CONSTRAINTS
+ (
+   CHILD_CD_ID NUMBER,
+   CHILD_INTEGER_IDX NUMBER,
+   CHILD_TBL_ID NUMBER,
+   PARENT_CD_ID NUMBER,
+   PARENT_INTEGER_IDX NUMBER NOT NULL,
+   PARENT_TBL_ID NUMBER NOT NULL,
+   POSITION NUMBER NOT NULL,
+   CONSTRAINT_NAME VARCHAR(400) NOT NULL,
+   CONSTRAINT_TYPE NUMBER NOT NULL,
+   UPDATE_RULE NUMBER,
+   DELETE_RULE NUMBER,
+   ENABLE_VALIDATE_RELY NUMBER NOT NULL,
+   DEFAULT_VALUE VARCHAR(400)
+ ) ;
+ 
+ ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAINT_NAME, POSITION);
+ 
+ CREATE INDEX CONSTRAINTS_PT_INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID);
+ 
+ CREATE INDEX CONSTRAINTS_CT_INDEX ON KEY_CONSTRAINTS(CONSTRAINT_TYPE);
+ 
+ -- Table for METASTORE_DB_PROPERTIES and its constraints
+ CREATE TABLE METASTORE_DB_PROPERTIES
+ (
+   PROPERTY_KEY VARCHAR(255) NOT NULL,
+   PROPERTY_VALUE VARCHAR(1000) NOT NULL,
+   DESCRIPTION VARCHAR(1000)
+ );
+ 
+ ALTER TABLE METASTORE_DB_PROPERTIES ADD CONSTRAINT PROPERTY_KEY_PK PRIMARY KEY (PROPERTY_KEY);
+ 
+ -- Constraints for resource plan tables.
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN ("NAME");
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, PATH);
+ 
+ ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_FK1 FOREIGN KEY (DEFAULT_POOL_ID) REFERENCES WM_POOL (POOL_ID);
+ 
+ ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME");
+ 
+ ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK1 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK2 FOREIGN KEY (TRIGGER_ID) REFERENCES WM_TRIGGER (TRIGGER_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_NAME);
+ 
+ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK2 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
+ 
+ ALTER TABLE MV_TABLES_USED ADD CONSTRAINT MV_TABLES_USED_FK1 FOREIGN KEY (MV_CREATION_METADATA_ID) REFERENCES MV_CREATION_METADATA (MV_CREATION_METADATA_ID);
+ 
+ ALTER TABLE MV_TABLES_USED ADD CONSTRAINT MV_TABLES_USED_FK2 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID);
+ 
+ ------------------------------
+ -- Transaction and lock tables
+ ------------------------------
+ CREATE TABLE TXNS (
+   TXN_ID NUMBER(19) PRIMARY KEY,
+   TXN_STATE char(1) NOT NULL,
+   TXN_STARTED NUMBER(19) NOT NULL,
+   TXN_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+   TXN_USER varchar(128) NOT NULL,
+   TXN_HOST varchar(128) NOT NULL,
+   TXN_AGENT_INFO varchar2(128),
+   TXN_META_INFO varchar2(128),
+   TXN_HEARTBEAT_COUNT number(10),
+   TXN_TYPE number(10)
+ ) ROWDEPENDENCIES;
+ 
+ CREATE TABLE TXN_COMPONENTS (
+   TC_TXNID NUMBER(19) NOT NULL REFERENCES TXNS (TXN_ID),
+   TC_DATABASE VARCHAR2(128) NOT NULL,
+   TC_TABLE VARCHAR2(128),
+   TC_PARTITION VARCHAR2(767) NULL,
+   TC_OPERATION_TYPE char(1) NOT NULL,
+   TC_WRITEID NUMBER(19)
+ ) ROWDEPENDENCIES;
+ 
+ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
+ 
+ CREATE TABLE COMPLETED_TXN_COMPONENTS (
+   CTC_TXNID NUMBER(19) NOT NULL,
+   CTC_DATABASE VARCHAR2(128) NOT NULL,
+   CTC_TABLE VARCHAR2(256),
+   CTC_PARTITION VARCHAR2(767),
+   CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL,
+   CTC_WRITEID NUMBER(19),
+   CTC_UPDATE_DELETE CHAR(1) NOT NULL
+ ) ROWDEPENDENCIES;
+ 
+ CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
+ 
+ CREATE TABLE NEXT_TXN_ID (
+   NTXN_NEXT NUMBER(19) NOT NULL
+ );
+ INSERT INTO NEXT_TXN_ID VALUES(1);
+ 
+ CREATE TABLE HIVE_LOCKS (
+   HL_LOCK_EXT_ID NUMBER(19) NOT NULL,
+   HL_LOCK_INT_ID NUMBER(19) NOT NULL,
+   HL_TXNID NUMBER(19) NOT NULL,
+   HL_DB VARCHAR2(128) NOT NULL,
+   HL_TABLE VARCHAR2(128),
+   HL_PARTITION VARCHAR2(767),
+   HL_LOCK_STATE CHAR(1) NOT NULL,
+   HL_LOCK_TYPE CHAR(1) NOT NULL,
+   HL_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+   HL_ACQUIRED_AT NUMBER(19),
+   HL_USER varchar(128) NOT NULL,
+   HL_HOST varchar(128) NOT NULL,
+   HL_HEARTBEAT_COUNT number(10),
+   HL_AGENT_INFO varchar2(128),
+   HL_BLOCKEDBY_EXT_ID number(19),
+   HL_BLOCKEDBY_INT_ID number(19),
+   PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+ ) ROWDEPENDENCIES;
+ 
+ CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
+ 
+ CREATE TABLE NEXT_LOCK_ID (
+   NL_NEXT NUMBER(19) NOT NULL
+ );
+ INSERT INTO NEXT_LOCK_ID VALUES(1);
+ 
+ CREATE TABLE COMPACTION_QUEUE (
+   CQ_ID NUMBER(19) PRIMARY KEY,
+   CQ_DATABASE varchar(128) NOT NULL,
+   CQ_TABLE varchar(128) NOT NULL,
+   CQ_PARTITION varchar(767),
+   CQ_STATE char(1) NOT NULL,
+   CQ_TYPE char(1) NOT NULL,
+   CQ_TBLPROPERTIES varchar(2048),
+   CQ_WORKER_ID varchar(128),
+   CQ_START NUMBER(19),
+   CQ_RUN_AS varchar(128),
+   CQ_HIGHEST_WRITE_ID NUMBER(19),
+   CQ_META_INFO BLOB,
+   CQ_HADOOP_JOB_ID varchar2(32)
+ ) ROWDEPENDENCIES;
+ 
+ CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+   NCQ_NEXT NUMBER(19) NOT NULL
+ );
+ INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+ 
+ CREATE TABLE COMPLETED_COMPACTIONS (
+   CC_ID NUMBER(19) PRIMARY KEY,
+   CC_DATABASE varchar(128) NOT NULL,
+   CC_TABLE varchar(128) NOT NULL,
+   CC_PARTITION varchar(767),
+   CC_STATE char(1) NOT NULL,
+   CC_TYPE char(1) NOT NULL,
+   CC_TBLPROPERTIES varchar(2048),
+   CC_WORKER_ID varchar(128),
+   CC_START NUMBER(19),
+   CC_END NUMBER(19),
+   CC_RUN_AS varchar(128),
+   CC_HIGHEST_WRITE_ID NUMBER(19),
+   CC_META_INFO BLOB,
+   CC_HADOOP_JOB_ID varchar2(32)
+ ) ROWDEPENDENCIES;
+ 
+ CREATE TABLE AUX_TABLE (
+   MT_KEY1 varchar2(128) NOT NULL,
+   MT_KEY2 number(19) NOT NULL,
+   MT_COMMENT varchar2(255),
+   PRIMARY KEY(MT_KEY1, MT_KEY2)
+ );
+ 
+ CREATE TABLE WRITE_SET (
+   WS_DATABASE varchar2(128) NOT NULL,
+   WS_TABLE varchar2(128) NOT NULL,
+   WS_PARTITION varchar2(767),
+   WS_TXNID number(19) NOT NULL,
+   WS_COMMIT_ID number(19) NOT NULL,
+   WS_OPERATION_TYPE char(1) NOT NULL
+ );
+ 
+ CREATE TABLE TXN_TO_WRITE_ID (
+   T2W_TXNID NUMBER(19) NOT NULL,
+   T2W_DATABASE VARCHAR2(128) NOT NULL,
+   T2W_TABLE VARCHAR2(256) NOT NULL,
+   T2W_WRITEID NUMBER(19) NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID);
+ CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID);
+ 
+ CREATE TABLE NEXT_WRITE_ID (
+   NWI_DATABASE VARCHAR2(128) NOT NULL,
+   NWI_TABLE VARCHAR2(256) NOT NULL,
+   NWI_NEXT NUMBER(19) NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE);
+ 
+ CREATE TABLE MIN_HISTORY_LEVEL (
+   MHL_TXNID NUMBER(19) NOT NULL,
+   MHL_MIN_OPEN_TXNID NUMBER(19) NOT NULL,
+   PRIMARY KEY(MHL_TXNID)
+ );
+ 
+ CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
+ 
+ CREATE TABLE MATERIALIZATION_REBUILD_LOCKS (
+   MRL_TXN_ID NUMBER NOT NULL,
+   MRL_DB_NAME VARCHAR(128) NOT NULL,
+   MRL_TBL_NAME VARCHAR(256) NOT NULL,
+   MRL_LAST_HEARTBEAT NUMBER NOT NULL,
+   PRIMARY KEY(MRL_TXN_ID)
+ );
+ 
+ CREATE TABLE "I_SCHEMA" (
+   "SCHEMA_ID" number primary key,
+   "SCHEMA_TYPE" number not null,
+   "NAME" varchar2(256) unique,
+   "DB_ID" number references "DBS" ("DB_ID"),
+   "COMPATIBILITY" number not null,
+   "VALIDATION_LEVEL" number not null,
+   "CAN_EVOLVE" number(1) not null,
+   "SCHEMA_GROUP" varchar2(256),
+   "DESCRIPTION" varchar2(4000)
+ );
+ 
+ CREATE TABLE "SCHEMA_VERSION" (
+   "SCHEMA_VERSION_ID" number primary key,
+   "SCHEMA_ID" number references "I_SCHEMA" ("SCHEMA_ID"),
+   "VERSION" number not null,
+   "CREATED_AT" number not null,
+   "CD_ID" number references "CDS" ("CD_ID"), 
+   "STATE" number not null,
+   "DESCRIPTION" varchar2(4000),
+   "SCHEMA_TEXT" clob,
+   "FINGERPRINT" varchar2(256),
+   "SCHEMA_VERSION_NAME" varchar2(256),
+   "SERDE_ID" number references "SERDES" ("SERDE_ID"), 
+   UNIQUE ("SCHEMA_ID", "VERSION")
+ );
+ 
+ CREATE TABLE REPL_TXN_MAP (
+   RTM_REPL_POLICY varchar(256) NOT NULL,
+   RTM_SRC_TXN_ID number(19) NOT NULL,
+   RTM_TARGET_TXN_ID number(19) NOT NULL,
+   PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID)
+ );
+ 
+ CREATE TABLE RUNTIME_STATS (
+   RS_ID NUMBER primary key,
+   CREATE_TIME NUMBER(10) NOT NULL,
+   WEIGHT NUMBER(10) NOT NULL,
+   PAYLOAD BLOB
+ );
+ 
+ CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+ 
+ CREATE TABLE TXN_WRITE_NOTIFICATION_LOG (
+   WNL_ID number(19) NOT NULL,
+   WNL_TXNID number(19) NOT NULL,
+   WNL_WRITEID number(19) NOT NULL,
+   WNL_DATABASE varchar(128) NOT NULL,
+   WNL_TABLE varchar(128) NOT NULL,
+   WNL_PARTITION varchar(1024) NOT NULL,
+   WNL_TABLE_OBJ clob NOT NULL,
+   WNL_PARTITION_OBJ clob,
+   WNL_FILES clob,
+   WNL_EVENT_TIME number(10) NOT NULL,
+   PRIMARY KEY (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION)
+ );
+ 
+ INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1);
+ 
+ -- -----------------------------------------------------------------
+ -- Record schema version. Should be the last step in the init script
+ -- -----------------------------------------------------------------
+ INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.0.0', 'Hive release version 4.0.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
index 0000000,71f5034..c9c6b30
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
@@@ -1,0 -1,342 +1,343 @@@
+ SELECT 'Upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual;
+ 
+ --@041-HIVE-16556.oracle.sql;
+ CREATE TABLE METASTORE_DB_PROPERTIES
+ (
+   PROPERTY_KEY VARCHAR(255) NOT NULL,
+   PROPERTY_VALUE VARCHAR(1000) NOT NULL,
+   DESCRIPTION VARCHAR(1000)
+ );
+ 
+ ALTER TABLE METASTORE_DB_PROPERTIES ADD CONSTRAINT PROPERTY_KEY_PK PRIMARY KEY (PROPERTY_KEY);
+ 
+ --@042-HIVE-16575.oracle.sql;
+ CREATE INDEX CONSTRAINTS_CT_INDEX ON KEY_CONSTRAINTS(CONSTRAINT_TYPE);
+ 
+ --@043-HIVE-16922.oracle.sql;
+ UPDATE SERDE_PARAMS
+ SET PARAM_KEY='collection.delim'
+ WHERE PARAM_KEY='colelction.delim';
+ 
+ --@044-HIVE-16997.oracle.sql;
+ ALTER TABLE PART_COL_STATS ADD BIT_VECTOR BLOB NULL;
+ ALTER TABLE TAB_COL_STATS ADD BIT_VECTOR BLOB NULL;
+ 
+ --@045-HIVE-16886.oracle.sql;
+ INSERT INTO NOTIFICATION_SEQUENCE (NNI_ID, NEXT_EVENT_ID) SELECT 1,1 FROM DUAL WHERE NOT EXISTS ( SELECT NEXT_EVENT_ID FROM NOTIFICATION_SEQUENCE);
+ 
+ --@046-HIVE-17566.oracle.sql;
+ CREATE TABLE WM_RESOURCEPLAN
+ (
+     RP_ID NUMBER NOT NULL,
+     "NAME" VARCHAR2(128) NOT NULL,
+     QUERY_PARALLELISM NUMBER(10),
+     STATUS VARCHAR2(20) NOT NULL,
+     DEFAULT_POOL_ID NUMBER
+ );
+ 
+ ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN ("NAME");
+ 
+ 
+ CREATE TABLE WM_POOL
+ (
+     POOL_ID NUMBER NOT NULL,
+     RP_ID NUMBER NOT NULL,
+     PATH VARCHAR2(1024) NOT NULL,
+     ALLOC_FRACTION NUMBER,
+     QUERY_PARALLELISM NUMBER(10),
+     SCHEDULING_POLICY VARCHAR2(1024)
+ );
+ 
+ ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, PATH);
+ ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ 
+ CREATE TABLE WM_TRIGGER
+ (
+     TRIGGER_ID NUMBER NOT NULL,
+     RP_ID NUMBER NOT NULL,
+     "NAME" VARCHAR2(128) NOT NULL,
+     TRIGGER_EXPRESSION VARCHAR2(1024),
+     ACTION_EXPRESSION VARCHAR2(1024),
+     IS_IN_UNMANAGED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_IN_UNMANAGED IN (1,0))
+ );
+ 
+ ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_PK PRIMARY KEY (TRIGGER_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME");
+ 
+ ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ 
+ CREATE TABLE WM_POOL_TO_TRIGGER
+ (
+     POOL_ID NUMBER NOT NULL,
+     TRIGGER_ID NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_PK PRIMARY KEY (POOL_ID, TRIGGER_ID);
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK1 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK2 FOREIGN KEY (TRIGGER_ID) REFERENCES WM_TRIGGER (TRIGGER_ID);
+ 
+ 
+ CREATE TABLE WM_MAPPING
+ (
+     MAPPING_ID NUMBER NOT NULL,
+     RP_ID NUMBER NOT NULL,
+     ENTITY_TYPE VARCHAR2(128) NOT NULL,
+     ENTITY_NAME VARCHAR2(128) NOT NULL,
+     POOL_ID NUMBER NOT NULL,
+     ORDERING NUMBER(10)
+ );
+ 
+ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_NAME);
+ 
+ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK2 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
+ 
+ -- Upgrades for Schema Registry objects
+ ALTER TABLE "SERDES" ADD "DESCRIPTION" VARCHAR(4000);
+ ALTER TABLE "SERDES" ADD "SERIALIZER_CLASS" VARCHAR(4000);
+ ALTER TABLE "SERDES" ADD "DESERIALIZER_CLASS" VARCHAR(4000);
+ ALTER TABLE "SERDES" ADD "SERDE_TYPE" INTEGER;
+ 
+ CREATE TABLE "I_SCHEMA" (
+   "SCHEMA_ID" number primary key,
+   "SCHEMA_TYPE" number not null,
+   "NAME" varchar2(256) unique,
+   "DB_ID" number references "DBS" ("DB_ID"),
+   "COMPATIBILITY" number not null,
+   "VALIDATION_LEVEL" number not null,
+   "CAN_EVOLVE" number(1) not null,
+   "SCHEMA_GROUP" varchar2(256),
+   "DESCRIPTION" varchar2(4000)
+ );
+ 
+ CREATE TABLE "SCHEMA_VERSION" (
+   "SCHEMA_VERSION_ID" number primary key,
+   "SCHEMA_ID" number references "I_SCHEMA" ("SCHEMA_ID"),
+   "VERSION" number not null,
+   "CREATED_AT" number not null,
+   "CD_ID" number references "CDS" ("CD_ID"), 
+   "STATE" number not null,
+   "DESCRIPTION" varchar2(4000),
+   "SCHEMA_TEXT" clob,
+   "FINGERPRINT" varchar2(256),
+   "SCHEMA_VERSION_NAME" varchar2(256),
+   "SERDE_ID" number references "SERDES" ("SERDE_ID"), 
+   UNIQUE ("SCHEMA_ID", "VERSION")
+ );
+ 
+ 
+ -- 048-HIVE-14498
+ CREATE TABLE MV_CREATION_METADATA
+ (
+     MV_CREATION_METADATA_ID NUMBER NOT NULL,
+     CAT_NAME VARCHAR2(256) NOT NULL,
+     DB_NAME VARCHAR2(128) NOT NULL,
+     TBL_NAME VARCHAR2(256) NOT NULL,
+     TXN_LIST CLOB NULL
+ );
+ 
+ ALTER TABLE MV_CREATION_METADATA ADD CONSTRAINT MV_CREATION_METADATA_PK PRIMARY KEY (MV_CREATION_METADATA_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_TABLE ON MV_CREATION_METADATA ("DB_NAME", "TBL_NAME");
+ 
+ CREATE TABLE MV_TABLES_USED
+ (
+     MV_CREATION_METADATA_ID NUMBER NOT NULL,
+     TBL_ID NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE MV_TABLES_USED ADD CONSTRAINT MV_TABLES_USED_FK1 FOREIGN KEY (MV_CREATION_METADATA_ID) REFERENCES MV_CREATION_METADATA (MV_CREATION_METADATA_ID);
+ 
+ ALTER TABLE MV_TABLES_USED ADD CONSTRAINT MV_TABLES_USED_FK2 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID);
+ 
+ ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_TIMESTAMP timestamp NULL;
+ 
+ UPDATE COMPLETED_TXN_COMPONENTS SET CTC_TIMESTAMP = CURRENT_TIMESTAMP;
+ 
+ ALTER TABLE COMPLETED_TXN_COMPONENTS MODIFY(CTC_TIMESTAMP DEFAULT CURRENT_TIMESTAMP);
+ 
+ ALTER TABLE COMPLETED_TXN_COMPONENTS MODIFY(CTC_TIMESTAMP NOT NULL);
+ 
+ CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
+ 
+ -- 049-HIVE-18489
+ UPDATE FUNC_RU
+   SET RESOURCE_URI = 's3a' || SUBSTR(RESOURCE_URI, 4)
+   WHERE RESOURCE_URI LIKE 's3n://%' ;
+ 
+ UPDATE SKEWED_COL_VALUE_LOC_MAP
+   SET LOCATION = 's3a' || SUBSTR(LOCATION, 4)
+   WHERE LOCATION LIKE 's3n://%' ;
+ 
+ UPDATE SDS
+   SET LOCATION = 's3a' || SUBSTR(LOCATION, 4)
+   WHERE LOCATION LIKE 's3n://%' ;
+ 
+ UPDATE DBS
+   SET DB_LOCATION_URI = 's3a' || SUBSTR(DB_LOCATION_URI, 4)
+   WHERE DB_LOCATION_URI LIKE 's3n://%' ;
+ 
+ -- HIVE-18192
+ CREATE TABLE TXN_TO_WRITE_ID (
+   T2W_TXNID NUMBER(19) NOT NULL,
+   T2W_DATABASE VARCHAR2(128) NOT NULL,
+   T2W_TABLE VARCHAR2(256) NOT NULL,
+   T2W_WRITEID NUMBER(19) NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID);
+ CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID);
+ 
+ CREATE TABLE NEXT_WRITE_ID (
+   NWI_DATABASE VARCHAR2(128) NOT NULL,
+   NWI_TABLE VARCHAR2(256) NOT NULL,
+   NWI_NEXT NUMBER(19) NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE);
+ 
+ ALTER TABLE COMPACTION_QUEUE RENAME COLUMN CQ_HIGHEST_TXN_ID TO CQ_HIGHEST_WRITE_ID;
+ 
+ ALTER TABLE COMPLETED_COMPACTIONS RENAME COLUMN CC_HIGHEST_TXN_ID TO CC_HIGHEST_WRITE_ID;
+ 
+ -- Modify txn_components/completed_txn_components tables to add write id.
+ ALTER TABLE TXN_COMPONENTS ADD TC_WRITEID number(19);
+ ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_WRITEID number(19);
+ 
+ -- HIVE-18726
+ -- add a new column to support default value for DEFAULT constraint
+ ALTER TABLE KEY_CONSTRAINTS ADD DEFAULT_VALUE VARCHAR(400);
+ ALTER TABLE KEY_CONSTRAINTS MODIFY (PARENT_CD_ID NULL);
+ 
+ ALTER TABLE HIVE_LOCKS MODIFY(HL_TXNID NOT NULL);
+ 
+ -- HIVE-18755, add catalogs
+ -- new catalogs table
+ CREATE TABLE CTLGS (
+     CTLG_ID NUMBER PRIMARY KEY,
+     "NAME" VARCHAR2(256),
+     "DESC" VARCHAR2(4000),
+     LOCATION_URI VARCHAR2(4000) NOT NULL,
+     UNIQUE ("NAME")
+ );
+ 
+ -- Insert a default value.  The location is TBD.  Hive will fix this when it starts
+ INSERT INTO CTLGS VALUES (1, 'hive', 'Default catalog for Hive', 'TBD');
+ 
+ -- Drop the unique index on DBS
+ DROP INDEX UNIQUE_DATABASE;
+ 
+ -- Add the new column to the DBS table, can't put in the not null constraint yet
+ ALTER TABLE DBS ADD CTLG_NAME VARCHAR2(256);
+ 
+ -- Update all records in the DBS table to point to the Hive catalog
+ UPDATE DBS 
+   SET "CTLG_NAME" = 'hive';
+ 
+ -- Add the not null constraint
+ ALTER TABLE DBS MODIFY CTLG_NAME NOT NULL;
+ 
+ -- Put back the unique index 
+ CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME", CTLG_NAME);
+ 
+ -- Add the foreign key
+ ALTER TABLE DBS ADD CONSTRAINT CTLGS_FK FOREIGN KEY (CTLG_NAME) REFERENCES CTLGS ("NAME") INITIALLY DEFERRED;
+ 
+ -- Add columns to table stats and part stats
+ ALTER TABLE TAB_COL_STATS ADD CAT_NAME VARCHAR2(256);
+ ALTER TABLE PART_COL_STATS ADD CAT_NAME VARCHAR2(256);
+ 
+ -- Set the existing column names to Hive
+ UPDATE TAB_COL_STATS
+   SET CAT_NAME = 'hive';
+ UPDATE PART_COL_STATS
+   SET CAT_NAME = 'hive';
+ 
+ -- Add the not null constraint
+ ALTER TABLE TAB_COL_STATS MODIFY CAT_NAME NOT NULL;
+ ALTER TABLE PART_COL_STATS MODIFY CAT_NAME NOT NULL;
+ 
+ -- Rebuild the index for Part col stats.  No such index for table stats, which seems weird
+ DROP INDEX PCS_STATS_IDX;
+ CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);
+ 
+ -- Add column to partition events
+ ALTER TABLE PARTITION_EVENTS ADD CAT_NAME VARCHAR2(256);
+ UPDATE PARTITION_EVENTS
+   SET CAT_NAME = 'hive' WHERE DB_NAME IS NOT NULL;
+ 
+ -- Add column to notification log
+ ALTER TABLE NOTIFICATION_LOG ADD CAT_NAME VARCHAR2(256);
+ UPDATE NOTIFICATION_LOG
+   SET CAT_NAME = 'hive' WHERE DB_NAME IS NOT NULL;
+ 
+ CREATE TABLE REPL_TXN_MAP (
+   RTM_REPL_POLICY varchar(256) NOT NULL,
+   RTM_SRC_TXN_ID number(19) NOT NULL,
+   RTM_TARGET_TXN_ID number(19) NOT NULL,
+   PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID)
+ );
+ 
+ INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) SELECT 'org.apache.hadoop.hive.metastore.model.MNotificationLog',1 FROM DUAL WHERE NOT EXISTS ( SELECT NEXT_VAL FROM SEQUENCE_TABLE WHERE SEQUENCE_NAME = 'org.apache.hadoop.hive.metastore.model.MNotificationLog');
+ 
+ -- HIVE-18747
+ CREATE TABLE MIN_HISTORY_LEVEL (
+   MHL_TXNID number(19) NOT NULL,
+   MHL_MIN_OPEN_TXNID number(19) NOT NULL,
+   PRIMARY KEY(MHL_TXNID)
+ );
+ 
+ CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
+ 
+ CREATE TABLE RUNTIME_STATS (
+   RS_ID NUMBER primary key,
+   CREATE_TIME NUMBER(10) NOT NULL,
+   WEIGHT NUMBER(10) NOT NULL,
+   PAYLOAD BLOB
+ );
+ 
+ CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+ 
+ -- HIVE-18193
+ -- Populate NEXT_WRITE_ID for each Transactional table and set next write ID same as next txn ID
+ INSERT INTO NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE, NWI_NEXT)
+     SELECT * FROM
+         (SELECT DB.NAME, TBL_INFO.TBL_NAME FROM DBS DB,
+             (SELECT TBL.DB_ID, TBL.TBL_NAME FROM TBLS TBL,
+                 (SELECT TBL_ID FROM TABLE_PARAMS WHERE PARAM_KEY='transactional' AND to_char(PARAM_VALUE)='true') TBL_PARAM
+             WHERE TBL.TBL_ID=TBL_PARAM.TBL_ID) TBL_INFO
+         where DB.DB_ID=TBL_INFO.DB_ID) DB_TBL_NAME,
+         (SELECT NTXN_NEXT FROM NEXT_TXN_ID) NEXT_WRITE;
+ 
+ -- Populate TXN_TO_WRITE_ID for each aborted/open txns and set write ID equal to txn ID
+ INSERT INTO TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID, T2W_WRITEID)
+     SELECT * FROM
+         (SELECT DB.NAME, TBL_INFO.TBL_NAME FROM DBS DB,
+             (SELECT TBL.DB_ID, TBL.TBL_NAME FROM TBLS TBL,
+                 (SELECT TBL_ID FROM TABLE_PARAMS WHERE PARAM_KEY='transactional' AND to_char(PARAM_VALUE)='true') TBL_PARAM
+             WHERE TBL.TBL_ID=TBL_PARAM.TBL_ID) TBL_INFO
+         where DB.DB_ID=TBL_INFO.DB_ID) DB_TBL_NAME,
+         (SELECT TXN_ID, TXN_ID as WRITE_ID FROM TXNS) TXN_INFO;
+ 
+ -- Update TXN_COMPONENTS and COMPLETED_TXN_COMPONENTS for write ID which is same as txn ID
+ UPDATE TXN_COMPONENTS SET TC_WRITEID = TC_TXNID;
+ UPDATE COMPLETED_TXN_COMPONENTS SET CTC_WRITEID = CTC_TXNID;
+ 
+ ALTER TABLE TBLS ADD OWNER_TYPE VARCHAR2(10) NULL;
+ 
+ -- These lines need to be last.  Insert any changes above.
+ UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1;
+ SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual;
++

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql
index 0000000,6fa5e2d..c94e6ec
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql
@@@ -1,0 -1,6 +1,9 @@@
+ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0' AS Status from dual;
+ 
++ALTER TABLE TBLS ADD WRITE_ID number NULL;
++ALTER TABLE PARTITIONS ADD WRITE_ID number NULL;
++
+ -- These lines need to be last.  Insert any changes above.
+ UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
+ SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0' AS Status from dual;
+ 


[45/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0723

Posted by se...@apache.org.
HIVE-19416 : merge master into branch (Sergey Shelukhin) 0723


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d2c60f3a
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d2c60f3a
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d2c60f3a

Branch: refs/heads/master
Commit: d2c60f3ae18fb18b1bc50355a7740cf352cab782
Parents: e8d7cdc 90d19ac
Author: sergey <se...@apache.org>
Authored: Mon Jul 23 11:22:33 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Mon Jul 23 11:22:33 2018 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hive/ql/log/PerfLogger.java   |    2 +
 .../scripts/upgrade/derby/upgrade.order.derby   |    2 +
 .../scripts/upgrade/mssql/upgrade.order.mssql   |    2 +
 .../scripts/upgrade/mysql/upgrade.order.mysql   |    2 +
 .../scripts/upgrade/oracle/upgrade.order.oracle |    2 +
 .../upgrade/postgres/upgrade.order.postgres     |    2 +
 .../apache/hadoop/hive/ql/exec/ExplainTask.java |   46 +
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |   16 +-
 .../exec/spark/SparkDynamicPartitionPruner.java |   25 +-
 .../hive/ql/exec/spark/SparkPlanGenerator.java  |   24 +
 .../org/apache/hadoop/hive/ql/io/AcidUtils.java |  211 ++
 .../hadoop/hive/ql/io/HiveInputFormat.java      |   12 -
 .../hadoop/hive/ql/lockmgr/DbTxnManager.java    |  221 +--
 .../apache/hadoop/hive/ql/metadata/Table.java   |    4 +
 .../calcite/translator/RexNodeConverter.java    |    2 +-
 .../hive/ql/parse/ExplainConfiguration.java     |    8 +
 .../hive/ql/parse/ExplainSemanticAnalyzer.java  |    2 +
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |    1 +
 .../apache/hadoop/hive/ql/plan/ExplainWork.java |   17 +
 .../hive/ql/udf/generic/GenericUDFIn.java       |    2 +-
 .../hive/ql/udf/generic/GenericUDFUtils.java    |   57 +-
 ql/src/test/queries/clientpositive/bucket7.q    |    2 +
 .../test/queries/clientpositive/explain_locks.q |   22 +
 .../queries/clientpositive/orc_ppd_decimal.q    |   16 +-
 .../clientpositive/parquet_ppd_decimal.q        |   16 +-
 .../vectorization_parquet_ppd_decimal.q         |   16 +-
 .../results/clientpositive/explain_locks.q.out  |   91 +
 .../clientpositive/llap/orc_ppd_decimal.q.out   |   48 +-
 .../clientpositive/parquet_ppd_decimal.q.out    |   80 +-
 .../vectorization_parquet_ppd_decimal.q.out     |   80 +-
 .../server/ThreadWithGarbageCleanup.java        |    6 +-
 .../hadoop/hive/metastore/HiveMetaStore.java    |    8 +-
 .../hive/metastore/LockComponentBuilder.java    |    5 +
 .../hive/metastore/LockRequestBuilder.java      |   17 +
 .../hadoop/hive/metastore/ObjectStore.java      |    7 +-
 .../main/sql/derby/hive-schema-3.1.0.derby.sql  |   15 -
 .../main/sql/derby/hive-schema-3.2.0.derby.sql  |  720 +++++++
 .../main/sql/derby/hive-schema-4.0.0.derby.sql  |    2 +-
 .../sql/derby/upgrade-3.0.0-to-3.1.0.derby.sql  |   16 -
 .../sql/derby/upgrade-3.1.0-to-3.2.0.derby.sql  |   20 +
 .../sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql  |    8 -
 .../sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql  |    8 +
 .../src/main/sql/derby/upgrade.order.derby      |    3 +-
 .../main/sql/mssql/hive-schema-3.1.0.mssql.sql  |   17 -
 .../main/sql/mssql/hive-schema-3.2.0.mssql.sql  | 1284 ++++++++++++
 .../main/sql/mssql/hive-schema-4.0.0.mssql.sql  |    2 +-
 .../sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql  |   17 -
 .../sql/mssql/upgrade-3.1.0-to-3.2.0.mssql.sql  |   23 +
 .../sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql  |   10 -
 .../sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql  |   10 +
 .../src/main/sql/mssql/upgrade.order.mssql      |    3 +-
 .../main/sql/mysql/hive-schema-3.1.0.mysql.sql  |   16 -
 .../main/sql/mysql/hive-schema-3.2.0.mysql.sql  | 1218 ++++++++++++
 .../main/sql/mysql/hive-schema-4.0.0.mysql.sql  |    2 +-
 .../sql/mysql/upgrade-3.0.0-to-3.1.0.mysql.sql  |   15 -
 .../sql/mysql/upgrade-3.1.0-to-3.2.0.mysql.sql  |   22 +
 .../sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql  |   10 -
 .../sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql  |   10 +
 .../src/main/sql/mysql/upgrade.order.mysql      |    3 +-
 .../sql/oracle/hive-schema-3.1.0.oracle.sql     |   16 -
 .../sql/oracle/hive-schema-3.2.0.oracle.sql     | 1175 +++++++++++
 .../sql/oracle/hive-schema-4.0.0.oracle.sql     |    2 +-
 .../oracle/upgrade-3.0.0-to-3.1.0.oracle.sql    |   16 -
 .../oracle/upgrade-3.1.0-to-3.2.0.oracle.sql    |   22 +
 .../oracle/upgrade-3.1.0-to-4.0.0.oracle.sql    |    9 -
 .../oracle/upgrade-3.2.0-to-4.0.0.oracle.sql    |    9 +
 .../src/main/sql/oracle/upgrade.order.oracle    |    3 +-
 .../sql/postgres/hive-schema-3.1.0.postgres.sql |   16 -
 .../sql/postgres/hive-schema-3.2.0.postgres.sql | 1866 ++++++++++++++++++
 .../sql/postgres/hive-schema-4.0.0.postgres.sql |    2 +-
 .../upgrade-3.0.0-to-3.1.0.postgres.sql         |   16 -
 .../upgrade-3.1.0-to-3.2.0.postgres.sql         |   22 +
 .../upgrade-3.1.0-to-4.0.0.postgres.sql         |   10 -
 .../upgrade-3.2.0-to-4.0.0.postgres.sql         |   10 +
 .../main/sql/postgres/upgrade.order.postgres    |    3 +-
 75 files changed, 7218 insertions(+), 507 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index 183d690,a9983b0..7818efb
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@@ -32,7 -32,7 +32,8 @@@ import java.util.Properties
  import java.util.Set;
  import java.util.regex.Pattern;
  
 +import org.apache.avro.generic.GenericData;
+ import com.google.common.base.Preconditions;
  import org.apache.hadoop.conf.Configuration;
  import org.apache.hadoop.fs.FSDataInputStream;
  import org.apache.hadoop.fs.FSDataOutputStream;
@@@ -40,23 -40,31 +41,31 @@@ import org.apache.hadoop.fs.FileStatus
  import org.apache.hadoop.fs.FileSystem;
  import org.apache.hadoop.fs.Path;
  import org.apache.hadoop.fs.PathFilter;
 -import org.apache.hadoop.hive.common.HiveStatsUtils;
 -import org.apache.hadoop.hive.common.ValidReaderWriteIdList;
 -import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
 -import org.apache.hadoop.hive.common.ValidWriteIdList;
 +import org.apache.hadoop.hive.common.*;
  import org.apache.hadoop.hive.conf.HiveConf;
  import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.LockComponentBuilder;
  import org.apache.hadoop.hive.metastore.TransactionalValidationListener;
 +import org.apache.hadoop.hive.metastore.Warehouse;
  import org.apache.hadoop.hive.metastore.api.DataOperationType;
+ import org.apache.hadoop.hive.metastore.api.LockComponent;
+ import org.apache.hadoop.hive.metastore.api.LockType;
  import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
  import org.apache.hadoop.hive.ql.ErrorMsg;
  import org.apache.hadoop.hive.ql.exec.Utilities;
+ import org.apache.hadoop.hive.ql.hooks.Entity;
+ import org.apache.hadoop.hive.ql.hooks.ReadEntity;
+ import org.apache.hadoop.hive.ql.hooks.WriteEntity;
  import org.apache.hadoop.hive.ql.io.AcidUtils.ParsedDelta;
  import org.apache.hadoop.hive.ql.io.orc.OrcFile;
  import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
  import org.apache.hadoop.hive.ql.io.orc.OrcRecordUpdater;
  import org.apache.hadoop.hive.ql.io.orc.Reader;
  import org.apache.hadoop.hive.ql.io.orc.Writer;
 +import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
 +import org.apache.hadoop.hive.ql.lockmgr.LockException;
+ import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
  import org.apache.hadoop.hive.ql.metadata.Table;
  import org.apache.hadoop.hive.ql.plan.CreateTableDesc;
  import org.apache.hadoop.hive.ql.plan.TableScanDesc;
@@@ -2114,4 -1993,201 +2123,206 @@@ public class AcidUtils 
      tblProps.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "false");
      tblProps.remove(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
    }
+ 
+   private static boolean needsLock(Entity entity) {
+     switch (entity.getType()) {
+     case TABLE:
+       return isLockableTable(entity.getTable());
+     case PARTITION:
+       return isLockableTable(entity.getPartition().getTable());
+     default:
+       return true;
+     }
+   }
+ 
+   private static Table getTable(WriteEntity we) {
+     Table t = we.getTable();
+     if (t == null) {
+       throw new IllegalStateException("No table info for " + we);
+     }
+     return t;
+   }
+ 
+   private static boolean isLockableTable(Table t) {
+     if (t.isTemporary()) {
+       return false;
+     }
+     switch (t.getTableType()) {
+     case MANAGED_TABLE:
+     case MATERIALIZED_VIEW:
+       return true;
+     default:
+       return false;
+     }
+   }
+ 
+   /**
+    * Create lock components from write/read entities.
+    * @param outputs write entities
+    * @param inputs read entities
+    * @param conf
+    * @return list with lock components
+    */
+   public static List<LockComponent> makeLockComponents(Set<WriteEntity> outputs, Set<ReadEntity> inputs,
+       HiveConf conf) {
+     List<LockComponent> lockComponents = new ArrayList<>();
+     // For each source to read, get a shared lock
+     for (ReadEntity input : inputs) {
+       if (!input.needsLock() || input.isUpdateOrDelete() || !AcidUtils.needsLock(input)) {
+         // We don't want to acquire read locks during update or delete as we'll be acquiring write
+         // locks instead. Also, there's no need to lock temp tables since they're session wide
+         continue;
+       }
+       LockComponentBuilder compBuilder = new LockComponentBuilder();
+       compBuilder.setShared();
+       compBuilder.setOperationType(DataOperationType.SELECT);
+ 
+       Table t = null;
+       switch (input.getType()) {
+       case DATABASE:
+         compBuilder.setDbName(input.getDatabase().getName());
+         break;
+ 
+       case TABLE:
+         t = input.getTable();
+         compBuilder.setDbName(t.getDbName());
+         compBuilder.setTableName(t.getTableName());
+         break;
+ 
+       case PARTITION:
+       case DUMMYPARTITION:
+         compBuilder.setPartitionName(input.getPartition().getName());
+         t = input.getPartition().getTable();
+         compBuilder.setDbName(t.getDbName());
+         compBuilder.setTableName(t.getTableName());
+         break;
+ 
+       default:
+         // This is a file or something we don't hold locks for.
+         continue;
+       }
+       if (t != null) {
+         compBuilder.setIsTransactional(AcidUtils.isTransactionalTable(t));
+       }
+       LockComponent comp = compBuilder.build();
+       LOG.debug("Adding lock component to lock request " + comp.toString());
+       lockComponents.add(comp);
+     }
+     // For each source to write to, get the appropriate lock type.  If it's
+     // an OVERWRITE, we need to get an exclusive lock.  If it's an insert (no
+     // overwrite) than we need a shared.  If it's update or delete then we
+     // need a SEMI-SHARED.
+     for (WriteEntity output : outputs) {
+       LOG.debug("output is null " + (output == null));
+       if (output.getType() == Entity.Type.DFS_DIR || output.getType() == Entity.Type.LOCAL_DIR || !AcidUtils
+           .needsLock(output)) {
+         // We don't lock files or directories. We also skip locking temp tables.
+         continue;
+       }
+       LockComponentBuilder compBuilder = new LockComponentBuilder();
+       Table t = null;
+       switch (output.getType()) {
+       case DATABASE:
+         compBuilder.setDbName(output.getDatabase().getName());
+         break;
+ 
+       case TABLE:
+       case DUMMYPARTITION:   // in case of dynamic partitioning lock the table
+         t = output.getTable();
+         compBuilder.setDbName(t.getDbName());
+         compBuilder.setTableName(t.getTableName());
+         break;
+ 
+       case PARTITION:
+         compBuilder.setPartitionName(output.getPartition().getName());
+         t = output.getPartition().getTable();
+         compBuilder.setDbName(t.getDbName());
+         compBuilder.setTableName(t.getTableName());
+         break;
+ 
+       default:
+         // This is a file or something we don't hold locks for.
+         continue;
+       }
+       switch (output.getWriteType()) {
+         /* base this on HiveOperation instead?  this and DDL_NO_LOCK is peppered all over the code...
+          Seems much cleaner if each stmt is identified as a particular HiveOperation (which I'd think
+          makes sense everywhere).  This however would be problematic for merge...*/
+       case DDL_EXCLUSIVE:
+         compBuilder.setExclusive();
+         compBuilder.setOperationType(DataOperationType.NO_TXN);
+         break;
+       case INSERT_OVERWRITE:
+         t = AcidUtils.getTable(output);
+         if (AcidUtils.isTransactionalTable(t)) {
+           if (conf.getBoolVar(HiveConf.ConfVars.TXN_OVERWRITE_X_LOCK)) {
+             compBuilder.setExclusive();
+           } else {
+             compBuilder.setSemiShared();
+           }
+           compBuilder.setOperationType(DataOperationType.UPDATE);
+         } else {
+           compBuilder.setExclusive();
+           compBuilder.setOperationType(DataOperationType.NO_TXN);
+         }
+         break;
+       case INSERT:
+         assert t != null;
+         if (AcidUtils.isTransactionalTable(t)) {
+           compBuilder.setShared();
+         } else if (MetaStoreUtils.isNonNativeTable(t.getTTable())) {
+           final HiveStorageHandler storageHandler = Preconditions.checkNotNull(t.getStorageHandler(),
+               "Thought all the non native tables have an instance of storage handler");
+           LockType lockType = storageHandler.getLockType(output);
+           if (null == LockType.findByValue(lockType.getValue())) {
+             throw new IllegalArgumentException(String
+                 .format("Lock type [%s] for Database.Table [%s.%s] is unknown", lockType, t.getDbName(),
+                     t.getTableName()));
+           }
+           compBuilder.setLock(lockType);
+         } else {
+           if (conf.getBoolVar(HiveConf.ConfVars.HIVE_TXN_STRICT_LOCKING_MODE)) {
+             compBuilder.setExclusive();
+           } else {  // this is backward compatible for non-ACID resources, w/o ACID semantics
+             compBuilder.setShared();
+           }
+         }
+         compBuilder.setOperationType(DataOperationType.INSERT);
+         break;
+       case DDL_SHARED:
+         compBuilder.setShared();
 -        compBuilder.setOperationType(DataOperationType.NO_TXN);
++        if (!output.isTxnAnalyze()) {
++          // Analyze needs txn components to be present, otherwise an aborted analyze write ID
++          // might be rolled under the watermark by compactor while stats written by it are
++          // still present.
++          compBuilder.setOperationType(DataOperationType.NO_TXN);
++        }
+         break;
+ 
+       case UPDATE:
+         compBuilder.setSemiShared();
+         compBuilder.setOperationType(DataOperationType.UPDATE);
+         break;
+       case DELETE:
+         compBuilder.setSemiShared();
+         compBuilder.setOperationType(DataOperationType.DELETE);
+         break;
+ 
+       case DDL_NO_LOCK:
+         continue; // No lock required here
+ 
+       default:
+         throw new RuntimeException("Unknown write type " + output.getWriteType().toString());
+       }
+       if (t != null) {
+         compBuilder.setIsTransactional(AcidUtils.isTransactionalTable(t));
+       }
+ 
+       compBuilder.setIsDynamicPartitionWrite(output.isDynamicPartitionWrite());
+       LockComponent comp = compBuilder.build();
+       LOG.debug("Adding lock component to lock request " + comp.toString());
+       lockComponents.add(comp);
+     }
+     return lockComponents;
+   }
  }

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
----------------------------------------------------------------------
diff --cc ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
index d3eefb9,06067a2..27abaf5
--- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
@@@ -24,12 -22,9 +24,10 @@@ import org.apache.hadoop.conf.Configura
  import org.apache.hadoop.hive.common.JavaUtils;
  import org.apache.hadoop.hive.common.ValidTxnList;
  import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
 +import org.apache.hadoop.hive.common.ValidWriteIdList;
  import org.apache.hadoop.hive.conf.HiveConf;
  import org.apache.hadoop.hive.metastore.IMetaStoreClient;
- import org.apache.hadoop.hive.metastore.LockComponentBuilder;
  import org.apache.hadoop.hive.metastore.LockRequestBuilder;
- import org.apache.hadoop.hive.metastore.api.DataOperationType;
  import org.apache.hadoop.hive.metastore.api.LockComponent;
  import org.apache.hadoop.hive.metastore.api.LockResponse;
  import org.apache.hadoop.hive.metastore.api.LockState;
@@@ -432,182 -398,15 +402,16 @@@ public final class DbTxnManager extend
      rqstBuilder.setTransactionId(txnId)
          .setUser(username);
  
-     // For each source to read, get a shared lock
-     for (ReadEntity input : plan.getInputs()) {
-       if (!input.needsLock() || input.isUpdateOrDelete() || !needsLock(input)) {
-         // We don't want to acquire read locks during update or delete as we'll be acquiring write
-         // locks instead. Also, there's no need to lock temp tables since they're session wide
-         continue;
-       }
-       LockComponentBuilder compBuilder = new LockComponentBuilder();
-       compBuilder.setShared();
-       compBuilder.setOperationType(DataOperationType.SELECT);
- 
-       Table t = null;
-       switch (input.getType()) {
-         case DATABASE:
-           compBuilder.setDbName(input.getDatabase().getName());
-           break;
- 
-         case TABLE:
-           t = input.getTable();
-           compBuilder.setDbName(t.getDbName());
-           compBuilder.setTableName(t.getTableName());
-           break;
- 
-         case PARTITION:
-         case DUMMYPARTITION:
-           compBuilder.setPartitionName(input.getPartition().getName());
-           t = input.getPartition().getTable();
-           compBuilder.setDbName(t.getDbName());
-           compBuilder.setTableName(t.getTableName());
-           break;
- 
-         default:
-           // This is a file or something we don't hold locks for.
-           continue;
-       }
-       if(t != null) {
-         compBuilder.setIsTransactional(AcidUtils.isTransactionalTable(t));
-       }
-       LockComponent comp = compBuilder.build();
-       LOG.debug("Adding lock component to lock request " + comp.toString());
-       rqstBuilder.addLockComponent(comp);
-       atLeastOneLock = true;
-     }
- 
-     // For each source to write to, get the appropriate lock type.  If it's
-     // an OVERWRITE, we need to get an exclusive lock.  If it's an insert (no
-     // overwrite) than we need a shared.  If it's update or delete then we
-     // need a SEMI-SHARED.
-     for (WriteEntity output : plan.getOutputs()) {
-       LOG.debug("output is null " + (output == null));
-       if (output.getType() == Entity.Type.DFS_DIR || output.getType() == Entity.Type.LOCAL_DIR ||
-           !needsLock(output)) {
-         // We don't lock files or directories. We also skip locking temp tables.
-         continue;
-       }
-       LockComponentBuilder compBuilder = new LockComponentBuilder();
-       Table t = null;
-       switch (output.getType()) {
-         case DATABASE:
-           compBuilder.setDbName(output.getDatabase().getName());
-           break;
- 
-         case TABLE:
-         case DUMMYPARTITION:   // in case of dynamic partitioning lock the table
-           t = output.getTable();
-           compBuilder.setDbName(t.getDbName());
-           compBuilder.setTableName(t.getTableName());
-           break;
- 
-         case PARTITION:
-           compBuilder.setPartitionName(output.getPartition().getName());
-           t = output.getPartition().getTable();
-           compBuilder.setDbName(t.getDbName());
-           compBuilder.setTableName(t.getTableName());
-           break;
- 
-         default:
-           // This is a file or something we don't hold locks for.
-           continue;
-       }
-       switch (output.getWriteType()) {
-         /* base this on HiveOperation instead?  this and DDL_NO_LOCK is peppered all over the code...
-          Seems much cleaner if each stmt is identified as a particular HiveOperation (which I'd think
-          makes sense everywhere).  This however would be problematic for merge...*/
-       case DDL_EXCLUSIVE:
-         compBuilder.setExclusive();
-         compBuilder.setOperationType(DataOperationType.NO_TXN);
-         break;
-       case INSERT_OVERWRITE:
-         t = getTable(output);
-         if (AcidUtils.isTransactionalTable(t)) {
-           if (conf.getBoolVar(HiveConf.ConfVars.TXN_OVERWRITE_X_LOCK)) {
-             compBuilder.setExclusive();
-           } else {
-             compBuilder.setSemiShared();
-           }
-           compBuilder.setOperationType(DataOperationType.UPDATE);
-         } else {
-           compBuilder.setExclusive();
-           compBuilder.setOperationType(DataOperationType.NO_TXN);
-         }
-         break;
-       case INSERT:
-         assert t != null;
-         if (AcidUtils.isTransactionalTable(t)) {
-           compBuilder.setShared();
-         } else if (MetaStoreUtils.isNonNativeTable(t.getTTable())) {
-           final HiveStorageHandler storageHandler = Preconditions.checkNotNull(t.getStorageHandler(),
-               "Thought all the non native tables have an instance of storage handler"
-           );
-           LockType lockType = storageHandler.getLockType(output);
-           switch (lockType) {
-           case EXCLUSIVE:
-             compBuilder.setExclusive();
-             break;
-           case SHARED_READ:
-             compBuilder.setShared();
-             break;
-           case SHARED_WRITE:
-             compBuilder.setSemiShared();
-             break;
-           default:
-             throw new IllegalArgumentException(String
-                 .format("Lock type [%s] for Database.Table [%s.%s] is unknown", lockType, t.getDbName(),
-                     t.getTableName()
-                 ));
-           }
 +
-         } else {
-           if (conf.getBoolVar(HiveConf.ConfVars.HIVE_TXN_STRICT_LOCKING_MODE)) {
-             compBuilder.setExclusive();
-           } else {  // this is backward compatible for non-ACID resources, w/o ACID semantics
-             compBuilder.setShared();
-           }
-         }
-         compBuilder.setOperationType(DataOperationType.INSERT);
-         break;
-       case DDL_SHARED:
-         compBuilder.setShared();
-         if (!output.isTxnAnalyze()) {
-           // Analyze needs txn components to be present, otherwise an aborted analyze write ID
-           // might be rolled under the watermark by compactor while stats written by it are
-           // still present.
-           compBuilder.setOperationType(DataOperationType.NO_TXN);
-         }
-         break;
- 
-       case UPDATE:
-         compBuilder.setSemiShared();
-         compBuilder.setOperationType(DataOperationType.UPDATE);
-         break;
-       case DELETE:
-         compBuilder.setSemiShared();
-         compBuilder.setOperationType(DataOperationType.DELETE);
-         break;
- 
-       case DDL_NO_LOCK:
-         continue; // No lock required here
- 
-       default:
-         throw new RuntimeException("Unknown write type " + output.getWriteType().toString());
-       }
-       if (t != null) {
-         compBuilder.setIsTransactional(AcidUtils.isTransactionalTable(t));
-       }
- 
-       compBuilder.setIsDynamicPartitionWrite(output.isDynamicPartitionWrite());
-       LockComponent comp = compBuilder.build();
-       LOG.debug("Adding lock component to lock request " + comp.toString());
-       rqstBuilder.addLockComponent(comp);
-       atLeastOneLock = true;
-     }
-     //plan
      // Make sure we need locks.  It's possible there's nothing to lock in
      // this operation.
-     if (!atLeastOneLock) {
+     if(plan.getInputs().isEmpty() && plan.getOutputs().isEmpty()) {
+       LOG.debug("No locks needed for queryId" + queryId);
+       return null;
+     }
+     List<LockComponent> lockComponents = AcidUtils.makeLockComponents(plan.getOutputs(), plan.getInputs(), conf);
+     //It's possible there's nothing to lock even if we have w/r entities.
+     if(lockComponents.isEmpty()) {
        LOG.debug("No locks needed for queryId" + queryId);
        return null;
      }

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql
index 0000000,e5c8ef7..aca5227
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-3.2.0-to-4.0.0.derby.sql
@@@ -1,0 -1,6 +1,8 @@@
+ -- Upgrade MetaStore schema from 3.2.0 to 4.0.0
 -
++-- HIVE-19416
++ALTER TABLE "APP"."TBLS" ADD WRITE_ID bigint DEFAULT 0;
++ALTER TABLE "APP"."PARTITIONS" ADD WRITE_ID bigint DEFAULT 0;
+ 
+ -- This needs to be the last thing done.  Insert any changes above this line.
+ UPDATE "APP".VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
+ 

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql
index 0000000,a8aad87..1d8fc55
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-3.2.0-to-4.0.0.mssql.sql
@@@ -1,0 -1,6 +1,10 @@@
+ SELECT 'Upgrading MetaStore schema from 3.2.0 to 4.0.0' AS MESSAGE;
+ 
++-- HIVE-19416
++ALTER TABLE TBLS ADD WRITE_ID bigint NULL;
++ALTER TABLE PARTITIONS ADD WRITE_ID bigint NULL;
++
+ -- These lines need to be last.  Insert any changes above.
+ UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
+ SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS MESSAGE;
+ 

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql
index 0000000,cb2e985..79e72ab
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-3.2.0-to-4.0.0.mysql.sql
@@@ -1,0 -1,6 +1,10 @@@
+ SELECT 'Upgrading MetaStore schema from 3.2.0 to 4.0.0' AS ' ';
+ 
++-- HIVE-19416
++ALTER TABLE TBLS ADD WRITE_ID bigint;
++ALTER TABLE PARTITIONS ADD WRITE_ID bigint;
++
+ -- These lines need to be last.  Insert any changes above.
+ UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
+ SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS ' ';
+ 

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql
index 0000000,89d37f4..aa20a49
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-3.2.0-to-4.0.0.oracle.sql
@@@ -1,0 -1,6 +1,9 @@@
+ SELECT 'Upgrading MetaStore schema from 3.2.0 to 4.0.0' AS Status from dual;
+ 
++ALTER TABLE TBLS ADD WRITE_ID number NULL;
++ALTER TABLE PARTITIONS ADD WRITE_ID number NULL;
++
+ -- These lines need to be last.  Insert any changes above.
+ UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
+ SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0' AS Status from dual;
+ 

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/d2c60f3a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql
index 0000000,08a1341..f7232a1
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql
+++ b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-3.2.0-to-4.0.0.postgres.sql
@@@ -1,0 -1,6 +1,10 @@@
+ SELECT 'Upgrading MetaStore schema from 3.2.0 to 4.0.0';
+ 
++-- HIVE-19416
++ALTER TABLE "TBLS" ADD "WRITE_ID" bigint;
++ALTER TABLE "PARTITIONS" ADD "WRITE_ID" bigint;
++
+ -- These lines need to be last.  Insert any changes above.
+ UPDATE "VERSION" SET "SCHEMA_VERSION"='4.0.0', "VERSION_COMMENT"='Hive release version 4.0.0' where "VER_ID"=1;
+ SELECT 'Finished upgrading MetaStore schema from 3.2.0 to 4.0.0';
+ 


[25/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
index 0000000,29c98d1..3a65f77
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
@@@ -1,0 -1,104 +1,109 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import java.util.List;
+ 
+ import org.apache.hadoop.classification.InterfaceAudience;
+ import org.apache.hadoop.conf.Configurable;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
+ import org.apache.hadoop.hive.metastore.txn.TxnStore;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ 
+ /**
+  * An interface wrapper for HMSHandler.  This interface contains methods that need to be
+  * called by internal classes but that are not part of the thrift interface.
+  */
+ @InterfaceAudience.Private
+ public interface IHMSHandler extends ThriftHiveMetastore.Iface, Configurable {
+ 
+   void init() throws MetaException;
+ 
+   /**
+    * Get the id of the thread of this handler.
+    * @return thread id
+    */
+   int getThreadId();
+ 
+   /**
+    * Get a reference to the underlying RawStore.
+    * @return the RawStore instance.
+    * @throws MetaException if the creation of a new RawStore object is necessary but fails.
+    */
+   RawStore getMS() throws MetaException;
+ 
+   /**
+    * Get a reference to the underlying TxnStore.
+    * @return the TxnStore instance.
+    */
+   TxnStore getTxnHandler();
+ 
+   /**
+    * Get a reference to Hive's warehouse object (the class that does all the physical operations).
+    * @return Warehouse instance.
+    */
+   Warehouse getWh();
+ 
+   /**
+    * Equivalent to get_database, but does not write to audit logs, or fire pre-event listeners.
+    * Meant to be used for internal hive classes that don't use the thrift interface.
+    * @param catName catalog name
+    * @param name database name
+    * @return database object
+    * @throws NoSuchObjectException If the database does not exist.
+    * @throws MetaException If another error occurs.
+    */
+   Database get_database_core(final String catName, final String name)
+       throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Equivalent of get_table, but does not log audits and fire pre-event listener.
+    * Meant to be used for calls made by other hive classes, that are not using the
+    * thrift interface.
+    * @param catName catalog name
+    * @param dbname database name
+    * @param name table name
+    * @return Table object
+    * @throws NoSuchObjectException If the table does not exist.
+    * @throws MetaException  If another error occurs.
+    */
+   Table get_table_core(final String catName, final String dbname, final String name)
+       throws MetaException, NoSuchObjectException;
+ 
++  Table get_table_core(final String catName, final String dbname,
++                       final String name, final long txnId,
++                       final String writeIdList)
++      throws MetaException, NoSuchObjectException;
++
+   /**
+    * Get a list of all transactional listeners.
+    * @return list of listeners.
+    */
+   List<TransactionalMetaStoreEventListener> getTransactionalListeners();
+ 
+   /**
+    * Get a list of all non-transactional listeners.
+    * @return list of non-transactional listeners.
+    */
+   List<MetaStoreEventListener> getListeners();
+ }


[06/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java
index 0000000,abbcda3..c5977b2
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java
@@@ -1,0 -1,211 +1,218 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import java.util.List;
++
+ import org.apache.hadoop.hive.metastore.api.Function;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ 
+ import static org.junit.Assert.assertEquals;
+ 
+ 
+ /**
+  * A wrapper around {@link ObjectStore} that allows us to inject custom behaviour
+  * on to some of the methods for testing.
+  */
+ public class InjectableBehaviourObjectStore extends ObjectStore {
+   public InjectableBehaviourObjectStore() {
+     super();
+   }
+ 
+   /**
+    * A utility class that allows people injecting behaviour to determine if their injections occurred.
+    */
+   public static abstract class BehaviourInjection<T, F>
+       implements com.google.common.base.Function<T, F>{
+     protected boolean injectionPathCalled = false;
+     protected boolean nonInjectedPathCalled = false;
+ 
+     public void assertInjectionsPerformed(
+         boolean expectedInjectionCalled, boolean expectedNonInjectedPathCalled){
+       assertEquals(expectedInjectionCalled, injectionPathCalled);
+       assertEquals(expectedNonInjectedPathCalled, nonInjectedPathCalled);
+     }
+   }
+ 
+   /**
+    * A utility class to pass the arguments of the caller to the stub method.
+    */
+   public class CallerArguments {
+     public String dbName;
+     public String tblName;
+     public String funcName;
+     public String constraintTblName;
+ 
+     public CallerArguments(String dbName) {
+       this.dbName = dbName;
+     }
+   }
+ 
+   private static com.google.common.base.Function<Table, Table> getTableModifier =
+       com.google.common.base.Functions.identity();
+   private static com.google.common.base.Function<Partition, Partition> getPartitionModifier =
+           com.google.common.base.Functions.identity();
+   private static com.google.common.base.Function<List<String>, List<String>> listPartitionNamesModifier =
+           com.google.common.base.Functions.identity();
+   private static com.google.common.base.Function<NotificationEventResponse, NotificationEventResponse>
+           getNextNotificationModifier = com.google.common.base.Functions.identity();
+ 
+   private static com.google.common.base.Function<CallerArguments, Boolean> callerVerifier = null;
+ 
+   // Methods to set/reset getTable modifier
+   public static void setGetTableBehaviour(com.google.common.base.Function<Table, Table> modifier){
+     getTableModifier = (modifier == null) ? com.google.common.base.Functions.identity() : modifier;
+   }
+ 
+   public static void resetGetTableBehaviour(){
+     setGetTableBehaviour(null);
+   }
+ 
+   // Methods to set/reset getPartition modifier
+   public static void setGetPartitionBehaviour(com.google.common.base.Function<Partition, Partition> modifier){
+     getPartitionModifier = (modifier == null) ? com.google.common.base.Functions.identity() : modifier;
+   }
+ 
+   public static void resetGetPartitionBehaviour(){
+     setGetPartitionBehaviour(null);
+   }
+ 
+   // Methods to set/reset listPartitionNames modifier
+   public static void setListPartitionNamesBehaviour(com.google.common.base.Function<List<String>, List<String>> modifier){
+     listPartitionNamesModifier = (modifier == null)? com.google.common.base.Functions.identity() : modifier;
+   }
+ 
+   public static void resetListPartitionNamesBehaviour(){
+     setListPartitionNamesBehaviour(null);
+   }
+ 
+   // Methods to set/reset getNextNotification modifier
+   public static void setGetNextNotificationBehaviour(
+           com.google.common.base.Function<NotificationEventResponse,NotificationEventResponse> modifier){
+     getNextNotificationModifier = (modifier == null)? com.google.common.base.Functions.identity() : modifier;
+   }
+ 
+   public static void resetGetNextNotificationBehaviour(){
+     setGetNextNotificationBehaviour(null);
+   }
+ 
+   // Methods to set/reset caller checker
+   public static void setCallerVerifier(com.google.common.base.Function<CallerArguments, Boolean> verifier){
+     callerVerifier = verifier;
+   }
+ 
+   public static void resetCallerVerifier(){
+     setCallerVerifier(null);
+   }
+ 
+   // ObjectStore methods to be overridden with injected behavior
+   @Override
+   public Table getTable(String catName, String dbName, String tableName) throws MetaException {
+     return getTableModifier.apply(super.getTable(catName, dbName, tableName));
+   }
+ 
+   @Override
++  public Table getTable(String catName, String dbName, String tableName,
++      long txnId, String writeIdList) throws MetaException {
++    return getTableModifier.apply(super.getTable(catName, dbName, tableName, txnId, writeIdList));
++  }
++
++  @Override
+   public Partition getPartition(String catName, String dbName, String tableName,
+                                 List<String> partVals) throws NoSuchObjectException, MetaException {
+     return getPartitionModifier.apply(super.getPartition(catName, dbName, tableName, partVals));
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String catName, String dbName, String tableName, short max)
+           throws MetaException {
+     return listPartitionNamesModifier.apply(super.listPartitionNames(catName, dbName, tableName, max));
+   }
+ 
+   @Override
+   public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) {
+     return getNextNotificationModifier.apply(super.getNextNotification(rqst));
+   }
+ 
+   @Override
+   public void createTable(Table tbl) throws InvalidObjectException, MetaException {
+     if (callerVerifier != null) {
+       CallerArguments args = new CallerArguments(tbl.getDbName());
+       args.tblName = tbl.getTableName();
+       Boolean success = callerVerifier.apply(args);
+       if ((success != null) && !success) {
+         throw new MetaException("InjectableBehaviourObjectStore: Invalid Create Table operation on DB: "
+                 + args.dbName + " table: " + args.tblName);
+       }
+     }
+     super.createTable(tbl);
+   }
+ 
+   @Override
+   public void createFunction(Function func) throws InvalidObjectException, MetaException {
+     if (callerVerifier != null) {
+       CallerArguments args = new CallerArguments(func.getDbName());
+       args.funcName = func.getFunctionName();
+       Boolean success = callerVerifier.apply(args);
+       if ((success != null) && !success) {
+         throw new MetaException("InjectableBehaviourObjectStore: Invalid Create Function operation on DB: "
+                 + args.dbName + " function: " + args.funcName);
+       }
+     }
+     super.createFunction(func);
+   }
+ 
+   @Override
+   public List<String> addPrimaryKeys(List<SQLPrimaryKey> pks) throws InvalidObjectException,
+           MetaException {
+     if (callerVerifier != null) {
+       CallerArguments args = new CallerArguments(pks.get(0).getTable_db());
+       args.constraintTblName = pks.get(0).getTable_name();
+       Boolean success = callerVerifier.apply(args);
+       if ((success != null) && !success) {
+         throw new MetaException("InjectableBehaviourObjectStore: Invalid Add Primary Key operation on DB: "
+                 + args.dbName + " table: " + args.constraintTblName);
+       }
+     }
+     return super.addPrimaryKeys(pks);
+   }
+ 
+   @Override
+   public List<String> addForeignKeys(List<SQLForeignKey> fks) throws InvalidObjectException,
+           MetaException {
+     if (callerVerifier != null) {
+       CallerArguments args = new CallerArguments(fks.get(0).getFktable_db());
+       args.constraintTblName = fks.get(0).getFktable_name();
+       Boolean success = callerVerifier.apply(args);
+       if ((success != null) && !success) {
+         throw new MetaException("InjectableBehaviourObjectStore: Invalid Add Foreign Key operation on DB: "
+                 + args.dbName + " table: " + args.constraintTblName);
+       }
+     }
+     return super.addForeignKeys(fks);
+   }
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
index 0000000,adc82b0..533cabb
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
@@@ -1,0 -1,121 +1,121 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest;
+ import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ import org.mockito.Mockito;
+ 
+ import java.util.Arrays;
+ 
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
+ 
+ @Category(MetastoreUnitTest.class)
+ public class TestHiveAlterHandler {
+ 
+   private Configuration conf = MetastoreConf.newMetastoreConf();
+ 
+   @Test
+   public void testAlterTableAddColNotUpdateStats() throws MetaException, InvalidObjectException, NoSuchObjectException {
+     FieldSchema col1 = new FieldSchema("col1", "string", "col1 comment");
+     FieldSchema col2 = new FieldSchema("col2", "string", "col2 comment");
+     FieldSchema col3 = new FieldSchema("col3", "string", "col3 comment");
+     FieldSchema col4 = new FieldSchema("col4", "string", "col4 comment");
+ 
+     StorageDescriptor oldSd = new StorageDescriptor();
+     oldSd.setCols(Arrays.asList(col1, col2, col3));
+     Table oldTable = new Table();
+     oldTable.setDbName("default");
+     oldTable.setTableName("test_table");
+     oldTable.setSd(oldSd);
+ 
+     StorageDescriptor newSd = new StorageDescriptor(oldSd);
+     newSd.setCols(Arrays.asList(col1, col2, col3, col4));
+     Table newTable = new Table(oldTable);
+     newTable.setSd(newSd);
+ 
+     RawStore msdb = Mockito.mock(RawStore.class);
+     Mockito.doThrow(new RuntimeException("shouldn't be called")).when(msdb).getTableColumnStatistics(
+         getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3"));
+     HiveAlterHandler handler = new HiveAlterHandler();
+     handler.setConf(conf);
 -    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable);
++    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null, -1, null);
+   }
+ 
+   @Test
+   public void testAlterTableDelColUpdateStats() throws MetaException, InvalidObjectException, NoSuchObjectException {
+     FieldSchema col1 = new FieldSchema("col1", "string", "col1 comment");
+     FieldSchema col2 = new FieldSchema("col2", "string", "col2 comment");
+     FieldSchema col3 = new FieldSchema("col3", "string", "col3 comment");
+     FieldSchema col4 = new FieldSchema("col4", "string", "col4 comment");
+ 
+     StorageDescriptor oldSd = new StorageDescriptor();
+     oldSd.setCols(Arrays.asList(col1, col2, col3, col4));
+     Table oldTable = new Table();
+     oldTable.setDbName("default");
+     oldTable.setTableName("test_table");
+     oldTable.setSd(oldSd);
+ 
+     StorageDescriptor newSd = new StorageDescriptor(oldSd);
+     newSd.setCols(Arrays.asList(col1, col2, col3));
+     Table newTable = new Table(oldTable);
+     newTable.setSd(newSd);
+ 
+     RawStore msdb = Mockito.mock(RawStore.class);
+     HiveAlterHandler handler = new HiveAlterHandler();
+     handler.setConf(conf);
 -    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable);
++    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null, -1, null);
+     Mockito.verify(msdb, Mockito.times(1)).getTableColumnStatistics(
+         getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4")
+     );
+   }
+ 
+   @Test
+   public void testAlterTableChangePosNotUpdateStats() throws MetaException, InvalidObjectException, NoSuchObjectException {
+     FieldSchema col1 = new FieldSchema("col1", "string", "col1 comment");
+     FieldSchema col2 = new FieldSchema("col2", "string", "col2 comment");
+     FieldSchema col3 = new FieldSchema("col3", "string", "col3 comment");
+     FieldSchema col4 = new FieldSchema("col4", "string", "col4 comment");
+ 
+     StorageDescriptor oldSd = new StorageDescriptor();
+     oldSd.setCols(Arrays.asList(col1, col2, col3, col4));
+     Table oldTable = new Table();
+     oldTable.setDbName("default");
+     oldTable.setTableName("test_table");
+     oldTable.setSd(oldSd);
+ 
+     StorageDescriptor newSd = new StorageDescriptor(oldSd);
+     newSd.setCols(Arrays.asList(col1, col4, col2, col3));
+     Table newTable = new Table(oldTable);
+     newTable.setSd(newSd);
+ 
+     RawStore msdb = Mockito.mock(RawStore.class);
+     Mockito.doThrow(new RuntimeException("shouldn't be called")).when(msdb).getTableColumnStatistics(
+         getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4"));
+     HiveAlterHandler handler = new HiveAlterHandler();
+     handler.setConf(conf);
 -    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable);
++    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null, -1, null);
+   }
+ 
+ }


[23/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
index 0000000,f45b71f..07be1ba
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java
@@@ -1,0 -1,2817 +1,2837 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import static org.apache.commons.lang.StringUtils.join;
+ import static org.apache.commons.lang.StringUtils.normalizeSpace;
+ import static org.apache.commons.lang.StringUtils.repeat;
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+ 
+ import java.sql.Blob;
+ import java.sql.Clob;
+ import java.sql.Connection;
+ import java.sql.SQLException;
+ import java.sql.Statement;
+ import java.text.ParseException;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.Iterator;
+ import java.util.LinkedList;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.TreeMap;
+ import java.util.stream.Collectors;
+ 
+ import javax.jdo.PersistenceManager;
+ import javax.jdo.Query;
+ import javax.jdo.Transaction;
+ import javax.jdo.datastore.JDOConnection;
+ 
+ import org.apache.commons.lang.BooleanUtils;
+ import org.apache.commons.lang.StringUtils;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.AggregateStatsCache.AggrColStats;
+ import org.apache.hadoop.hive.metastore.api.AggrStats;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.Order;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PrincipalType;
+ import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+ import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+ import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.SkewedInfo;
+ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.model.MConstraint;
+ import org.apache.hadoop.hive.metastore.model.MCreationMetadata;
+ import org.apache.hadoop.hive.metastore.model.MDatabase;
+ import org.apache.hadoop.hive.metastore.model.MNotificationLog;
+ import org.apache.hadoop.hive.metastore.model.MNotificationNextId;
+ import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege;
+ import org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics;
+ import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege;
+ import org.apache.hadoop.hive.metastore.model.MTableColumnStatistics;
+ import org.apache.hadoop.hive.metastore.model.MWMResourcePlan;
+ import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
+ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder;
+ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LeafNode;
+ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.LogicalOperator;
+ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator;
+ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeNode;
+ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeVisitor;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.ColStatsObjWithSourceInfo;
+ import org.apache.hive.common.util.BloomFilter;
+ import org.datanucleus.store.rdbms.query.ForwardQueryResult;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import com.google.common.collect.Lists;
+ 
+ /**
+  * This class contains the optimizations for MetaStore that rely on direct SQL access to
+  * the underlying database. It should use ANSI SQL and be compatible with common databases
+  * such as MySQL (note that MySQL doesn't use full ANSI mode by default), Postgres, etc.
+  *
+  * As of now, only the partition retrieval is done this way to improve job startup time;
+  * JDOQL partition retrieval is still present so as not to limit the ORM solution we have
+  * to SQL stores only. There's always a way to do without direct SQL.
+  */
+ class MetaStoreDirectSql {
+   private static final int NO_BATCHING = -1, DETECT_BATCHING = 0;
+ 
+   private static final Logger LOG = LoggerFactory.getLogger(MetaStoreDirectSql.class);
+   private final PersistenceManager pm;
++  private final Configuration conf;
+   private final String schema;
+ 
+   /**
+    * We want to avoid db-specific code in this class and stick with ANSI SQL. However:
+    * 1) mysql and postgres are differently ansi-incompatible (mysql by default doesn't support
+    * quoted identifiers, and postgres contravenes ANSI by coercing unquoted ones to lower case).
+    * MySQL's way of working around this is simpler (just set ansi quotes mode on), so we will
+    * use that. MySQL detection is done by actually issuing the set-ansi-quotes command;
+    *
+    * Use sparingly, we don't want to devolve into another DataNucleus...
+    */
+   private final DatabaseProduct dbType;
+   private final int batchSize;
+   private final boolean convertMapNullsToEmptyStrings;
+   private final String defaultPartName;
+ 
+   /**
+    * Whether direct SQL can be used with the current datastore backing {@link #pm}.
+    */
+   private final boolean isCompatibleDatastore;
+   private final boolean isAggregateStatsCacheEnabled;
+   private AggregateStatsCache aggrStatsCache;
+ 
+   @java.lang.annotation.Target(java.lang.annotation.ElementType.FIELD)
+   @java.lang.annotation.Retention(java.lang.annotation.RetentionPolicy.RUNTIME)
+   private @interface TableName {}
+ 
+   // Table names with schema name, if necessary
+   @TableName
+   private String DBS, TBLS, PARTITIONS, DATABASE_PARAMS, PARTITION_PARAMS, SORT_COLS, SD_PARAMS,
+       SDS, SERDES, SKEWED_STRING_LIST_VALUES, SKEWED_VALUES, BUCKETING_COLS, SKEWED_COL_NAMES,
+       SKEWED_COL_VALUE_LOC_MAP, COLUMNS_V2, PARTITION_KEYS, SERDE_PARAMS, PART_COL_STATS, KEY_CONSTRAINTS,
+       TAB_COL_STATS, PARTITION_KEY_VALS, PART_PRIVS, PART_COL_PRIVS, SKEWED_STRING_LIST, CDS;
+ 
++
+   public MetaStoreDirectSql(PersistenceManager pm, Configuration conf, String schema) {
+     this.pm = pm;
++    this.conf = conf;
+     this.schema = schema;
+     DatabaseProduct dbType = null;
+     try {
+       dbType = DatabaseProduct.determineDatabaseProduct(getProductName(pm));
+     } catch (SQLException e) {
+       LOG.warn("Cannot determine database product; assuming OTHER", e);
+       dbType = DatabaseProduct.OTHER;
+     }
+     this.dbType = dbType;
+     int batchSize = MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_PARTITION_BATCH_SIZE);
+     if (batchSize == DETECT_BATCHING) {
+       batchSize = DatabaseProduct.needsInBatching(dbType) ? 1000 : NO_BATCHING;
+     }
+     this.batchSize = batchSize;
+ 
+     for (java.lang.reflect.Field f : this.getClass().getDeclaredFields()) {
+       if (f.getAnnotation(TableName.class) == null) continue;
+       try {
+         f.set(this, getFullyQualifiedName(schema, f.getName()));
+       } catch (IllegalArgumentException | IllegalAccessException e) {
+         throw new RuntimeException("Internal error, cannot set " + f.getName());
+       }
+     }
+ 
+     convertMapNullsToEmptyStrings =
+         MetastoreConf.getBoolVar(conf, ConfVars.ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS);
+     defaultPartName = MetastoreConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME);
+ 
+     String jdoIdFactory = MetastoreConf.getVar(conf, ConfVars.IDENTIFIER_FACTORY);
+     if (! ("datanucleus1".equalsIgnoreCase(jdoIdFactory))){
+       LOG.warn("Underlying metastore does not use 'datanucleus1' for its ORM naming scheme."
+           + " Disabling directSQL as it uses hand-hardcoded SQL with that assumption.");
+       isCompatibleDatastore = false;
+     } else {
+       boolean isInTest = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST);
+       isCompatibleDatastore = (!isInTest || ensureDbInit()) && runTestQuery();
+       if (isCompatibleDatastore) {
+         LOG.debug("Using direct SQL, underlying DB is " + dbType);
+       }
+     }
+ 
+     isAggregateStatsCacheEnabled = MetastoreConf.getBoolVar(
+         conf, ConfVars.AGGREGATE_STATS_CACHE_ENABLED);
+     if (isAggregateStatsCacheEnabled) {
+       aggrStatsCache = AggregateStatsCache.getInstance(conf);
+     }
+   }
+ 
+   private static String getFullyQualifiedName(String schema, String tblName) {
+     return ((schema == null || schema.isEmpty()) ? "" : "\"" + schema + "\".\"")
+         + "\"" + tblName + "\"";
+   }
+ 
+ 
+   public MetaStoreDirectSql(PersistenceManager pm, Configuration conf) {
+     this(pm, conf, "");
+   }
+ 
+   static String getProductName(PersistenceManager pm) {
+     JDOConnection jdoConn = pm.getDataStoreConnection();
+     try {
+       return ((Connection)jdoConn.getNativeConnection()).getMetaData().getDatabaseProductName();
+     } catch (Throwable t) {
+       LOG.warn("Error retrieving product name", t);
+       return null;
+     } finally {
+       jdoConn.close(); // We must release the connection before we call other pm methods.
+     }
+   }
+ 
+   private boolean ensureDbInit() {
+     Transaction tx = pm.currentTransaction();
+     boolean doCommit = false;
+     if (!tx.isActive()) {
+       tx.begin();
+       doCommit = true;
+     }
+     LinkedList<Query> initQueries = new LinkedList<>();
+ 
+     try {
+       // Force the underlying db to initialize.
+       initQueries.add(pm.newQuery(MDatabase.class, "name == ''"));
+       initQueries.add(pm.newQuery(MTableColumnStatistics.class, "dbName == ''"));
+       initQueries.add(pm.newQuery(MPartitionColumnStatistics.class, "dbName == ''"));
+       initQueries.add(pm.newQuery(MConstraint.class, "childIntegerIndex < 0"));
+       initQueries.add(pm.newQuery(MNotificationLog.class, "dbName == ''"));
+       initQueries.add(pm.newQuery(MNotificationNextId.class, "nextEventId < -1"));
+       initQueries.add(pm.newQuery(MWMResourcePlan.class, "name == ''"));
+       initQueries.add(pm.newQuery(MCreationMetadata.class, "dbName == ''"));
+       initQueries.add(pm.newQuery(MPartitionPrivilege.class, "principalName == ''"));
+       initQueries.add(pm.newQuery(MPartitionColumnPrivilege.class, "principalName == ''"));
+       Query q;
+       while ((q = initQueries.peekFirst()) != null) {
+         q.execute();
+         initQueries.pollFirst();
+       }
+ 
+       return true;
+     } catch (Exception ex) {
+       doCommit = false;
+       LOG.warn("Database initialization failed; direct SQL is disabled", ex);
+       tx.rollback();
+       return false;
+     } finally {
+       if (doCommit) {
+         tx.commit();
+       }
+       for (Query q : initQueries) {
+         try {
+           q.closeAll();
+         } catch (Throwable t) {
+         }
+       }
+     }
+   }
+ 
+   private boolean runTestQuery() {
+     Transaction tx = pm.currentTransaction();
+     boolean doCommit = false;
+     if (!tx.isActive()) {
+       tx.begin();
+       doCommit = true;
+     }
+     Query query = null;
+     // Run a self-test query. If it doesn't work, we will self-disable. What a PITA...
+     String selfTestQuery = "select \"DB_ID\" from " + DBS + "";
+     try {
+       prepareTxn();
+       query = pm.newQuery("javax.jdo.query.SQL", selfTestQuery);
+       query.execute();
+       return true;
+     } catch (Throwable t) {
+       doCommit = false;
+       LOG.warn("Self-test query [" + selfTestQuery + "] failed; direct SQL is disabled", t);
+       tx.rollback();
+       return false;
+     } finally {
+       if (doCommit) {
+         tx.commit();
+       }
+       if (query != null) {
+         query.closeAll();
+       }
+     }
+   }
+ 
+   public String getSchema() {
+     return schema;
+   }
+ 
+   public boolean isCompatibleDatastore() {
+     return isCompatibleDatastore;
+   }
+ 
+   private void executeNoResult(final String queryText) throws SQLException {
+     JDOConnection jdoConn = pm.getDataStoreConnection();
+     Statement statement = null;
+     boolean doTrace = LOG.isDebugEnabled();
+     try {
+       long start = doTrace ? System.nanoTime() : 0;
+       statement = ((Connection)jdoConn.getNativeConnection()).createStatement();
+       statement.execute(queryText);
+       timingTrace(doTrace, queryText, start, doTrace ? System.nanoTime() : 0);
+     } finally {
+       if(statement != null){
+           statement.close();
+       }
+       jdoConn.close(); // We must release the connection before we call other pm methods.
+     }
+   }
+ 
+   public Database getDatabase(String catName, String dbName) throws MetaException{
+     Query queryDbSelector = null;
+     Query queryDbParams = null;
+     try {
+       dbName = dbName.toLowerCase();
+       catName = catName.toLowerCase();
+ 
+       String queryTextDbSelector= "select "
+           + "\"DB_ID\", \"NAME\", \"DB_LOCATION_URI\", \"DESC\", "
+           + "\"OWNER_NAME\", \"OWNER_TYPE\", \"CTLG_NAME\" "
+           + "FROM "+ DBS
+           + " where \"NAME\" = ? and \"CTLG_NAME\" = ? ";
+       Object[] params = new Object[] { dbName, catName };
+       queryDbSelector = pm.newQuery("javax.jdo.query.SQL", queryTextDbSelector);
+ 
+       if (LOG.isTraceEnabled()) {
+         LOG.trace("getDatabase:query instantiated : " + queryTextDbSelector
+             + " with param [" + params[0] + "]");
+       }
+ 
+       List<Object[]> sqlResult = executeWithArray(
+           queryDbSelector, params, queryTextDbSelector);
+       if ((sqlResult == null) || sqlResult.isEmpty()) {
+         return null;
+       }
+ 
+       assert(sqlResult.size() == 1);
+       if (sqlResult.get(0) == null) {
+         return null;
+       }
+ 
+       Object[] dbline = sqlResult.get(0);
+       Long dbid = extractSqlLong(dbline[0]);
+ 
+       String queryTextDbParams = "select \"PARAM_KEY\", \"PARAM_VALUE\" "
+           + " from " + DATABASE_PARAMS + " "
+           + " WHERE \"DB_ID\" = ? "
+           + " AND \"PARAM_KEY\" IS NOT NULL";
+       params[0] = dbid;
+       queryDbParams = pm.newQuery("javax.jdo.query.SQL", queryTextDbParams);
+       if (LOG.isTraceEnabled()) {
+         LOG.trace("getDatabase:query2 instantiated : " + queryTextDbParams
+             + " with param [" + params[0] + "]");
+       }
+ 
+       Map<String,String> dbParams = new HashMap<String,String>();
+       List<Object[]> sqlResult2 = ensureList(executeWithArray(
+           queryDbParams, params, queryTextDbParams));
+       if (!sqlResult2.isEmpty()) {
+         for (Object[] line : sqlResult2) {
+           dbParams.put(extractSqlString(line[0]), extractSqlString(line[1]));
+         }
+       }
+       Database db = new Database();
+       db.setName(extractSqlString(dbline[1]));
+       db.setLocationUri(extractSqlString(dbline[2]));
+       db.setDescription(extractSqlString(dbline[3]));
+       db.setOwnerName(extractSqlString(dbline[4]));
+       String type = extractSqlString(dbline[5]);
+       db.setOwnerType(
+           (null == type || type.trim().isEmpty()) ? null : PrincipalType.valueOf(type));
+       db.setCatalogName(extractSqlString(dbline[6]));
+       db.setParameters(MetaStoreUtils.trimMapNulls(dbParams,convertMapNullsToEmptyStrings));
+       if (LOG.isDebugEnabled()){
+         LOG.debug("getDatabase: directsql returning db " + db.getName()
+             + " locn["+db.getLocationUri()  +"] desc [" +db.getDescription()
+             + "] owner [" + db.getOwnerName() + "] ownertype ["+ db.getOwnerType() +"]");
+       }
+       return db;
+     } finally {
+       if (queryDbSelector != null){
+         queryDbSelector.closeAll();
+       }
+       if (queryDbParams != null){
+         queryDbParams.closeAll();
+       }
+     }
+   }
+ 
+   /**
+    * Get table names by using direct SQL queries.
+    * @param catName catalog name
+    * @param dbName Metastore database namme
+    * @param tableType Table type, or null if we want to get all tables
+    * @return list of table names
+    */
+   public List<String> getTables(String catName, String dbName, TableType tableType)
+       throws MetaException {
+     String queryText = "SELECT " + TBLS + ".\"TBL_NAME\""
+       + " FROM " + TBLS + " "
+       + " INNER JOIN " + DBS + " ON " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" "
+       + " WHERE " + DBS + ".\"NAME\" = ? AND " + DBS + ".\"CTLG_NAME\" = ? "
+       + (tableType == null ? "" : "AND " + TBLS + ".\"TBL_TYPE\" = ? ") ;
+ 
+     List<String> pms = new ArrayList<>();
+     pms.add(dbName);
+     pms.add(catName);
+     if (tableType != null) {
+       pms.add(tableType.toString());
+     }
+ 
+     Query<?> queryParams = pm.newQuery("javax.jdo.query.SQL", queryText);
+     return executeWithArray(
+         queryParams, pms.toArray(), queryText);
+   }
+ 
+   /**
+    * Get table names by using direct SQL queries.
+    *
+    * @param dbName Metastore database namme
+    * @return list of table names
+    */
+   public List<String> getMaterializedViewsForRewriting(String dbName) throws MetaException {
+     String queryText = "SELECT " + TBLS + ".\"TBL_NAME\""
+       + " FROM " + TBLS + " "
+       + " INNER JOIN " + DBS + " ON " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" "
+       + " WHERE " + DBS + ".\"NAME\" = ? AND " + TBLS + ".\"TBL_TYPE\" = ? " ;
+ 
+     List<String> pms = new ArrayList<String>();
+     pms.add(dbName);
+     pms.add(TableType.MATERIALIZED_VIEW.toString());
+ 
+     Query<?> queryParams = pm.newQuery("javax.jdo.query.SQL", queryText);
+     return executeWithArray(
+         queryParams, pms.toArray(), queryText);
+   }
+ 
+   /**
+    * Gets partitions by using direct SQL queries.
+    * @param catName Metastore catalog name.
+    * @param dbName Metastore db name.
+    * @param tblName Metastore table name.
+    * @param partNames Partition names to get.
+    * @return List of partitions.
+    */
+   public List<Partition> getPartitionsViaSqlFilter(final String catName, final String dbName,
+                                                    final String tblName, List<String> partNames)
+       throws MetaException {
+     if (partNames.isEmpty()) {
+       return Collections.emptyList();
+     }
+     return Batchable.runBatched(batchSize, partNames, new Batchable<String, Partition>() {
+       @Override
+       public List<Partition> run(List<String> input) throws MetaException {
+         String filter = "" + PARTITIONS + ".\"PART_NAME\" in (" + makeParams(input.size()) + ")";
+         List<Object> partitionIds = getPartitionIdsViaSqlFilter(catName, dbName, tblName,
+             filter, input, Collections.<String>emptyList(), null);
+         if (partitionIds.isEmpty()) {
+           return Collections.emptyList(); // no partitions, bail early.
+         }
+         return getPartitionsFromPartitionIds(catName, dbName, tblName, null, partitionIds);
+       }
+     });
+   }
+ 
+   /**
+    * Gets partitions by using direct SQL queries.
+    * @param filter The filter.
+    * @param max The maximum number of partitions to return.
+    * @return List of partitions.
+    */
+   public List<Partition> getPartitionsViaSqlFilter(
+       SqlFilterForPushdown filter, Integer max) throws MetaException {
+     Boolean isViewTable = isViewTable(filter.table);
+     String catName = filter.table.isSetCatName() ? filter.table.getCatName() :
+         DEFAULT_CATALOG_NAME;
+     List<Object> partitionIds = getPartitionIdsViaSqlFilter(catName,
+         filter.table.getDbName(), filter.table.getTableName(), filter.filter, filter.params,
+         filter.joins, max);
+     if (partitionIds.isEmpty()) {
+       return Collections.emptyList(); // no partitions, bail early.
+     }
+     return Batchable.runBatched(batchSize, partitionIds, new Batchable<Object, Partition>() {
+       @Override
+       public List<Partition> run(List<Object> input) throws MetaException {
+         return getPartitionsFromPartitionIds(catName, filter.table.getDbName(),
+             filter.table.getTableName(), isViewTable, input);
+       }
+     });
+   }
+ 
+   public static class SqlFilterForPushdown {
+     private final List<Object> params = new ArrayList<>();
+     private final List<String> joins = new ArrayList<>();
+     private String filter;
+     private Table table;
+   }
+ 
+   public boolean generateSqlFilterForPushdown(
+       Table table, ExpressionTree tree, SqlFilterForPushdown result) throws MetaException {
+     // Derby and Oracle do not interpret filters ANSI-properly in some cases and need a workaround.
+     boolean dbHasJoinCastBug = DatabaseProduct.hasJoinOperationOrderBug(dbType);
+     result.table = table;
+     result.filter = PartitionFilterGenerator.generateSqlFilter(table, tree, result.params,
+         result.joins, dbHasJoinCastBug, defaultPartName, dbType, schema);
+     return result.filter != null;
+   }
+ 
+   /**
+    * Gets all partitions of a table by using direct SQL queries.
+    * @param catName Metastore catalog name.
+    * @param dbName Metastore db name.
+    * @param tblName Metastore table name.
+    * @param max The maximum number of partitions to return.
+    * @return List of partitions.
+    */
+   public List<Partition> getPartitions(String catName,
+       String dbName, String tblName, Integer max) throws MetaException {
+     List<Object> partitionIds = getPartitionIdsViaSqlFilter(catName, dbName,
+         tblName, null, Collections.<String>emptyList(), Collections.<String>emptyList(), max);
+     if (partitionIds.isEmpty()) {
+       return Collections.emptyList(); // no partitions, bail early.
+     }
+ 
+     // Get full objects. For Oracle/etc. do it in batches.
+     List<Partition> result = Batchable.runBatched(batchSize, partitionIds, new Batchable<Object, Partition>() {
+       @Override
+       public List<Partition> run(List<Object> input) throws MetaException {
+         return getPartitionsFromPartitionIds(catName, dbName, tblName, null, input);
+       }
+     });
+     return result;
+   }
+ 
+   private static Boolean isViewTable(Table t) {
+     return t.isSetTableType() ?
+         t.getTableType().equals(TableType.VIRTUAL_VIEW.toString()) : null;
+   }
+ 
+   private boolean isViewTable(String catName, String dbName, String tblName) throws MetaException {
+     Query query = null;
+     try {
+       String queryText = "select \"TBL_TYPE\" from " + TBLS + "" +
+           " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" " +
+           " where " + TBLS + ".\"TBL_NAME\" = ? and " + DBS + ".\"NAME\" = ? and " + DBS + ".\"CTLG_NAME\" = ?";
+       Object[] params = new Object[] { tblName, dbName, catName };
+       query = pm.newQuery("javax.jdo.query.SQL", queryText);
+       query.setUnique(true);
+       Object result = executeWithArray(query, params, queryText);
+       return (result != null) && result.toString().equals(TableType.VIRTUAL_VIEW.toString());
+     } finally {
+       if (query != null) {
+         query.closeAll();
+       }
+     }
+   }
+ 
+   /**
+    * Get partition ids for the query using direct SQL queries, to avoid bazillion
+    * queries created by DN retrieving stuff for each object individually.
+    * @param catName MetaStore catalog name
+    * @param dbName MetaStore db name
+    * @param tblName MetaStore table name
+    * @param sqlFilter SQL filter to use. Better be SQL92-compliant.
+    * @param paramsForFilter params for ?-s in SQL filter text. Params must be in order.
+    * @param joinsForFilter if the filter needs additional join statement, they must be in
+    *                       this list. Better be SQL92-compliant.
+    * @param max The maximum number of partitions to return.
+    * @return List of partition objects.
+    */
+   private List<Object> getPartitionIdsViaSqlFilter(
+       String catName, String dbName, String tblName, String sqlFilter,
+       List<? extends Object> paramsForFilter, List<String> joinsForFilter, Integer max)
+       throws MetaException {
+     boolean doTrace = LOG.isDebugEnabled();
+     final String dbNameLcase = dbName.toLowerCase();
+     final String tblNameLcase = tblName.toLowerCase();
+     final String catNameLcase = normalizeSpace(catName).toLowerCase();
+ 
+     // We have to be mindful of order during filtering if we are not returning all partitions.
+     String orderForFilter = (max != null) ? " order by \"PART_NAME\" asc" : "";
+ 
+     String queryText =
+         "select " + PARTITIONS + ".\"PART_ID\" from " + PARTITIONS + ""
+       + "  inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " + TBLS + ".\"TBL_ID\" "
+       + "    and " + TBLS + ".\"TBL_NAME\" = ? "
+       + "  inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" "
+       + "     and " + DBS + ".\"NAME\" = ? "
+       + join(joinsForFilter, ' ')
+       + " where " + DBS + ".\"CTLG_NAME\" = ? "
+       + (StringUtils.isBlank(sqlFilter) ? "" : (" and " + sqlFilter)) + orderForFilter;
+     Object[] params = new Object[paramsForFilter.size() + 3];
+     params[0] = tblNameLcase;
+     params[1] = dbNameLcase;
+     params[2] = catNameLcase;
+     for (int i = 0; i < paramsForFilter.size(); ++i) {
+       params[i + 3] = paramsForFilter.get(i);
+     }
+ 
+     long start = doTrace ? System.nanoTime() : 0;
+     Query query = pm.newQuery("javax.jdo.query.SQL", queryText);
+     if (max != null) {
+       query.setRange(0, max.shortValue());
+     }
+     List<Object> sqlResult = executeWithArray(query, params, queryText);
+     long queryTime = doTrace ? System.nanoTime() : 0;
+     timingTrace(doTrace, queryText, start, queryTime);
+     if (sqlResult.isEmpty()) {
+       return Collections.emptyList(); // no partitions, bail early.
+     }
+ 
+     List<Object> result = new ArrayList<Object>(sqlResult.size());
+     for (Object fields : sqlResult) {
+       result.add(extractSqlLong(fields));
+     }
+     query.closeAll();
+     return result;
+   }
+ 
+   /** Should be called with the list short enough to not trip up Oracle/etc. */
+   private List<Partition> getPartitionsFromPartitionIds(String catName, String dbName, String tblName,
+       Boolean isView, List<Object> partIdList) throws MetaException {
+     boolean doTrace = LOG.isDebugEnabled();
+ 
+     int idStringWidth = (int)Math.ceil(Math.log10(partIdList.size())) + 1; // 1 for comma
+     int sbCapacity = partIdList.size() * idStringWidth;
+ 
+     String partIds = getIdListForIn(partIdList);
+ 
+     // Get most of the fields for the IDs provided.
+     // Assume db and table names are the same for all partition, as provided in arguments.
+     String queryText =
+       "select " + PARTITIONS + ".\"PART_ID\", " + SDS + ".\"SD_ID\", " + SDS + ".\"CD_ID\","
+     + " " + SERDES + ".\"SERDE_ID\", " + PARTITIONS + ".\"CREATE_TIME\","
+     + " " + PARTITIONS + ".\"LAST_ACCESS_TIME\", " + SDS + ".\"INPUT_FORMAT\", " + SDS + ".\"IS_COMPRESSED\","
+     + " " + SDS + ".\"IS_STOREDASSUBDIRECTORIES\", " + SDS + ".\"LOCATION\", " + SDS + ".\"NUM_BUCKETS\","
 -    + " " + SDS + ".\"OUTPUT_FORMAT\", " + SERDES + ".\"NAME\", " + SERDES + ".\"SLIB\" "
 -    + "from " + PARTITIONS + ""
++    + " " + SDS + ".\"OUTPUT_FORMAT\", " + SERDES + ".\"NAME\", " + SERDES + ".\"SLIB\", " + PARTITIONS
++    + ".\"WRITE_ID\"" + " from " + PARTITIONS + ""
+     + "  left outer join " + SDS + " on " + PARTITIONS + ".\"SD_ID\" = " + SDS + ".\"SD_ID\" "
+     + "  left outer join " + SERDES + " on " + SDS + ".\"SERDE_ID\" = " + SERDES + ".\"SERDE_ID\" "
+     + "where \"PART_ID\" in (" + partIds + ") order by \"PART_NAME\" asc";
+     long start = doTrace ? System.nanoTime() : 0;
+     Query query = pm.newQuery("javax.jdo.query.SQL", queryText);
+     List<Object[]> sqlResult = executeWithArray(query, null, queryText);
+     long queryTime = doTrace ? System.nanoTime() : 0;
+     Deadline.checkTimeout();
+ 
+     // Read all the fields and create partitions, SDs and serdes.
+     TreeMap<Long, Partition> partitions = new TreeMap<Long, Partition>();
+     TreeMap<Long, StorageDescriptor> sds = new TreeMap<Long, StorageDescriptor>();
+     TreeMap<Long, SerDeInfo> serdes = new TreeMap<Long, SerDeInfo>();
+     TreeMap<Long, List<FieldSchema>> colss = new TreeMap<Long, List<FieldSchema>>();
+     // Keep order by name, consistent with JDO.
+     ArrayList<Partition> orderedResult = new ArrayList<Partition>(partIdList.size());
+ 
+     // Prepare StringBuilder-s for "in (...)" lists to use in one-to-many queries.
+     StringBuilder sdSb = new StringBuilder(sbCapacity), serdeSb = new StringBuilder(sbCapacity);
+     StringBuilder colsSb = new StringBuilder(7); // We expect that there's only one field schema.
+     tblName = tblName.toLowerCase();
+     dbName = dbName.toLowerCase();
+     catName = normalizeSpace(catName).toLowerCase();
+     for (Object[] fields : sqlResult) {
+       // Here comes the ugly part...
+       long partitionId = extractSqlLong(fields[0]);
+       Long sdId = extractSqlLong(fields[1]);
+       Long colId = extractSqlLong(fields[2]);
+       Long serdeId = extractSqlLong(fields[3]);
+       // A partition must have at least sdId and serdeId set, or nothing set if it's a view.
+       if (sdId == null || serdeId == null) {
+         if (isView == null) {
+           isView = isViewTable(catName, dbName, tblName);
+         }
+         if ((sdId != null || colId != null || serdeId != null) || !isView) {
+           throw new MetaException("Unexpected null for one of the IDs, SD " + sdId +
+                   ", serde " + serdeId + " for a " + (isView ? "" : "non-") + " view");
+         }
+       }
+ 
+       Partition part = new Partition();
+       orderedResult.add(part);
+       // Set the collection fields; some code might not check presence before accessing them.
+       part.setParameters(new HashMap<>());
+       part.setValues(new ArrayList<String>());
+       part.setCatName(catName);
+       part.setDbName(dbName);
+       part.setTableName(tblName);
+       if (fields[4] != null) part.setCreateTime(extractSqlInt(fields[4]));
+       if (fields[5] != null) part.setLastAccessTime(extractSqlInt(fields[5]));
++      Long writeId = extractSqlLong(fields[14]);
++      if (writeId != null) {
++        part.setWriteId(writeId);
++      }
+       partitions.put(partitionId, part);
+ 
++
+       if (sdId == null) continue; // Probably a view.
+       assert serdeId != null;
+ 
+       // We assume each partition has an unique SD.
+       StorageDescriptor sd = new StorageDescriptor();
+       StorageDescriptor oldSd = sds.put(sdId, sd);
+       if (oldSd != null) {
+         throw new MetaException("Partitions reuse SDs; we don't expect that");
+       }
+       // Set the collection fields; some code might not check presence before accessing them.
+       sd.setSortCols(new ArrayList<Order>());
+       sd.setBucketCols(new ArrayList<String>());
+       sd.setParameters(new HashMap<String, String>());
+       sd.setSkewedInfo(new SkewedInfo(new ArrayList<String>(),
+           new ArrayList<List<String>>(), new HashMap<List<String>, String>()));
+       sd.setInputFormat((String)fields[6]);
+       Boolean tmpBoolean = extractSqlBoolean(fields[7]);
+       if (tmpBoolean != null) sd.setCompressed(tmpBoolean);
+       tmpBoolean = extractSqlBoolean(fields[8]);
+       if (tmpBoolean != null) sd.setStoredAsSubDirectories(tmpBoolean);
+       sd.setLocation((String)fields[9]);
+       if (fields[10] != null) sd.setNumBuckets(extractSqlInt(fields[10]));
+       sd.setOutputFormat((String)fields[11]);
+       sdSb.append(sdId).append(",");
+       part.setSd(sd);
+ 
+       if (colId != null) {
+         List<FieldSchema> cols = colss.get(colId);
+         // We expect that colId will be the same for all (or many) SDs.
+         if (cols == null) {
+           cols = new ArrayList<FieldSchema>();
+           colss.put(colId, cols);
+           colsSb.append(colId).append(",");
+         }
+         sd.setCols(cols);
+       }
+ 
+       // We assume each SD has an unique serde.
+       SerDeInfo serde = new SerDeInfo();
+       SerDeInfo oldSerde = serdes.put(serdeId, serde);
+       if (oldSerde != null) {
+         throw new MetaException("SDs reuse serdes; we don't expect that");
+       }
+       serde.setParameters(new HashMap<String, String>());
+       serde.setName((String)fields[12]);
+       serde.setSerializationLib((String)fields[13]);
+       serdeSb.append(serdeId).append(",");
+       sd.setSerdeInfo(serde);
++
+       Deadline.checkTimeout();
+     }
+     query.closeAll();
+     timingTrace(doTrace, queryText, start, queryTime);
+ 
+     // Now get all the one-to-many things. Start with partitions.
+     queryText = "select \"PART_ID\", \"PARAM_KEY\", \"PARAM_VALUE\" from " + PARTITION_PARAMS + ""
+         + " where \"PART_ID\" in (" + partIds + ") and \"PARAM_KEY\" is not null"
+         + " order by \"PART_ID\" asc";
+     loopJoinOrderedResult(partitions, queryText, 0, new ApplyFunc<Partition>() {
+       @Override
+       public void apply(Partition t, Object[] fields) {
+         t.putToParameters((String)fields[1], (String)fields[2]);
+       }});
+     // Perform conversion of null map values
+     for (Partition t : partitions.values()) {
+       t.setParameters(MetaStoreUtils.trimMapNulls(t.getParameters(), convertMapNullsToEmptyStrings));
+     }
+ 
+     queryText = "select \"PART_ID\", \"PART_KEY_VAL\" from " + PARTITION_KEY_VALS + ""
+         + " where \"PART_ID\" in (" + partIds + ")"
+         + " order by \"PART_ID\" asc, \"INTEGER_IDX\" asc";
+     loopJoinOrderedResult(partitions, queryText, 0, new ApplyFunc<Partition>() {
+       @Override
+       public void apply(Partition t, Object[] fields) {
+         t.addToValues((String)fields[1]);
+       }});
+ 
+     // Prepare IN (blah) lists for the following queries. Cut off the final ','s.
+     if (sdSb.length() == 0) {
+       assert serdeSb.length() == 0 && colsSb.length() == 0;
+       return orderedResult; // No SDs, probably a view.
+     }
+ 
+     String sdIds = trimCommaList(sdSb);
+     String serdeIds = trimCommaList(serdeSb);
+     String colIds = trimCommaList(colsSb);
+ 
+     // Get all the stuff for SD. Don't do empty-list check - we expect partitions do have SDs.
+     queryText = "select \"SD_ID\", \"PARAM_KEY\", \"PARAM_VALUE\" from " + SD_PARAMS + ""
+         + " where \"SD_ID\" in (" + sdIds + ") and \"PARAM_KEY\" is not null"
+         + " order by \"SD_ID\" asc";
+     loopJoinOrderedResult(sds, queryText, 0, new ApplyFunc<StorageDescriptor>() {
+       @Override
+       public void apply(StorageDescriptor t, Object[] fields) {
+         t.putToParameters((String)fields[1], extractSqlClob(fields[2]));
+       }});
+     // Perform conversion of null map values
+     for (StorageDescriptor t : sds.values()) {
+       t.setParameters(MetaStoreUtils.trimMapNulls(t.getParameters(), convertMapNullsToEmptyStrings));
+     }
+ 
+     queryText = "select \"SD_ID\", \"COLUMN_NAME\", " + SORT_COLS + ".\"ORDER\""
+         + " from " + SORT_COLS + ""
+         + " where \"SD_ID\" in (" + sdIds + ")"
+         + " order by \"SD_ID\" asc, \"INTEGER_IDX\" asc";
+     loopJoinOrderedResult(sds, queryText, 0, new ApplyFunc<StorageDescriptor>() {
+       @Override
+       public void apply(StorageDescriptor t, Object[] fields) {
+         if (fields[2] == null) return;
+         t.addToSortCols(new Order((String)fields[1], extractSqlInt(fields[2])));
+       }});
+ 
+     queryText = "select \"SD_ID\", \"BUCKET_COL_NAME\" from " + BUCKETING_COLS + ""
+         + " where \"SD_ID\" in (" + sdIds + ")"
+         + " order by \"SD_ID\" asc, \"INTEGER_IDX\" asc";
+     loopJoinOrderedResult(sds, queryText, 0, new ApplyFunc<StorageDescriptor>() {
+       @Override
+       public void apply(StorageDescriptor t, Object[] fields) {
+         t.addToBucketCols((String)fields[1]);
+       }});
+ 
+     // Skewed columns stuff.
+     queryText = "select \"SD_ID\", \"SKEWED_COL_NAME\" from " + SKEWED_COL_NAMES + ""
+         + " where \"SD_ID\" in (" + sdIds + ")"
+         + " order by \"SD_ID\" asc, \"INTEGER_IDX\" asc";
+     boolean hasSkewedColumns =
+       loopJoinOrderedResult(sds, queryText, 0, new ApplyFunc<StorageDescriptor>() {
+         @Override
+         public void apply(StorageDescriptor t, Object[] fields) {
+           if (!t.isSetSkewedInfo()) t.setSkewedInfo(new SkewedInfo());
+           t.getSkewedInfo().addToSkewedColNames((String)fields[1]);
+         }}) > 0;
+ 
+     // Assume we don't need to fetch the rest of the skewed column data if we have no columns.
+     if (hasSkewedColumns) {
+       // We are skipping the SKEWED_STRING_LIST table here, as it seems to be totally useless.
+       queryText =
+             "select " + SKEWED_VALUES + ".\"SD_ID_OID\","
+           + "  " + SKEWED_STRING_LIST_VALUES + ".\"STRING_LIST_ID\","
+           + "  " + SKEWED_STRING_LIST_VALUES + ".\"STRING_LIST_VALUE\" "
+           + "from " + SKEWED_VALUES + " "
+           + "  left outer join " + SKEWED_STRING_LIST_VALUES + " on " + SKEWED_VALUES + "."
+           + "\"STRING_LIST_ID_EID\" = " + SKEWED_STRING_LIST_VALUES + ".\"STRING_LIST_ID\" "
+           + "where " + SKEWED_VALUES + ".\"SD_ID_OID\" in (" + sdIds + ") "
+           + "  and " + SKEWED_VALUES + ".\"STRING_LIST_ID_EID\" is not null "
+           + "  and " + SKEWED_VALUES + ".\"INTEGER_IDX\" >= 0 "
+           + "order by " + SKEWED_VALUES + ".\"SD_ID_OID\" asc, " + SKEWED_VALUES + ".\"INTEGER_IDX\" asc,"
+           + "  " + SKEWED_STRING_LIST_VALUES + ".\"INTEGER_IDX\" asc";
+       loopJoinOrderedResult(sds, queryText, 0, new ApplyFunc<StorageDescriptor>() {
+         private Long currentListId;
+         private List<String> currentList;
+         @Override
+         public void apply(StorageDescriptor t, Object[] fields) throws MetaException {
+           if (!t.isSetSkewedInfo()) t.setSkewedInfo(new SkewedInfo());
+           // Note that this is not a typical list accumulator - there's no call to finalize
+           // the last list. Instead we add list to SD first, as well as locally to add elements.
+           if (fields[1] == null) {
+             currentList = null; // left outer join produced a list with no values
+             currentListId = null;
+             t.getSkewedInfo().addToSkewedColValues(Collections.<String>emptyList());
+           } else {
+             long fieldsListId = extractSqlLong(fields[1]);
+             if (currentListId == null || fieldsListId != currentListId) {
+               currentList = new ArrayList<String>();
+               currentListId = fieldsListId;
+               t.getSkewedInfo().addToSkewedColValues(currentList);
+             }
+             currentList.add((String)fields[2]);
+           }
+         }});
+ 
+       // We are skipping the SKEWED_STRING_LIST table here, as it seems to be totally useless.
+       queryText =
+             "select " + SKEWED_COL_VALUE_LOC_MAP + ".\"SD_ID\","
+           + " " + SKEWED_STRING_LIST_VALUES + ".STRING_LIST_ID,"
+           + " " + SKEWED_COL_VALUE_LOC_MAP + ".\"LOCATION\","
+           + " " + SKEWED_STRING_LIST_VALUES + ".\"STRING_LIST_VALUE\" "
+           + "from " + SKEWED_COL_VALUE_LOC_MAP + ""
+           + "  left outer join " + SKEWED_STRING_LIST_VALUES + " on " + SKEWED_COL_VALUE_LOC_MAP + "."
+           + "\"STRING_LIST_ID_KID\" = " + SKEWED_STRING_LIST_VALUES + ".\"STRING_LIST_ID\" "
+           + "where " + SKEWED_COL_VALUE_LOC_MAP + ".\"SD_ID\" in (" + sdIds + ")"
+           + "  and " + SKEWED_COL_VALUE_LOC_MAP + ".\"STRING_LIST_ID_KID\" is not null "
+           + "order by " + SKEWED_COL_VALUE_LOC_MAP + ".\"SD_ID\" asc,"
+           + "  " + SKEWED_STRING_LIST_VALUES + ".\"STRING_LIST_ID\" asc,"
+           + "  " + SKEWED_STRING_LIST_VALUES + ".\"INTEGER_IDX\" asc";
+ 
+       loopJoinOrderedResult(sds, queryText, 0, new ApplyFunc<StorageDescriptor>() {
+         private Long currentListId;
+         private List<String> currentList;
+         @Override
+         public void apply(StorageDescriptor t, Object[] fields) throws MetaException {
+           if (!t.isSetSkewedInfo()) {
+             SkewedInfo skewedInfo = new SkewedInfo();
+             skewedInfo.setSkewedColValueLocationMaps(new HashMap<List<String>, String>());
+             t.setSkewedInfo(skewedInfo);
+           }
+           Map<List<String>, String> skewMap = t.getSkewedInfo().getSkewedColValueLocationMaps();
+           // Note that this is not a typical list accumulator - there's no call to finalize
+           // the last list. Instead we add list to SD first, as well as locally to add elements.
+           if (fields[1] == null) {
+             currentList = new ArrayList<String>(); // left outer join produced a list with no values
+             currentListId = null;
+           } else {
+             long fieldsListId = extractSqlLong(fields[1]);
+             if (currentListId == null || fieldsListId != currentListId) {
+               currentList = new ArrayList<String>();
+               currentListId = fieldsListId;
+             } else {
+               skewMap.remove(currentList); // value based compare.. remove first
+             }
+             currentList.add((String)fields[3]);
+           }
+           skewMap.put(currentList, (String)fields[2]);
+         }});
+     } // if (hasSkewedColumns)
+ 
+     // Get FieldSchema stuff if any.
+     if (!colss.isEmpty()) {
+       // We are skipping the CDS table here, as it seems to be totally useless.
+       queryText = "select \"CD_ID\", \"COMMENT\", \"COLUMN_NAME\", \"TYPE_NAME\""
+           + " from " + COLUMNS_V2 + " where \"CD_ID\" in (" + colIds + ")"
+           + " order by \"CD_ID\" asc, \"INTEGER_IDX\" asc";
+       loopJoinOrderedResult(colss, queryText, 0, new ApplyFunc<List<FieldSchema>>() {
+         @Override
+         public void apply(List<FieldSchema> t, Object[] fields) {
+           t.add(new FieldSchema((String)fields[2], extractSqlClob(fields[3]), (String)fields[1]));
+         }});
+     }
+ 
+     // Finally, get all the stuff for serdes - just the params.
+     queryText = "select \"SERDE_ID\", \"PARAM_KEY\", \"PARAM_VALUE\" from " + SERDE_PARAMS + ""
+         + " where \"SERDE_ID\" in (" + serdeIds + ") and \"PARAM_KEY\" is not null"
+         + " order by \"SERDE_ID\" asc";
+     loopJoinOrderedResult(serdes, queryText, 0, new ApplyFunc<SerDeInfo>() {
+       @Override
+       public void apply(SerDeInfo t, Object[] fields) {
+         t.putToParameters((String)fields[1], extractSqlClob(fields[2]));
+       }});
+     // Perform conversion of null map values
+     for (SerDeInfo t : serdes.values()) {
+       t.setParameters(MetaStoreUtils.trimMapNulls(t.getParameters(), convertMapNullsToEmptyStrings));
+     }
+ 
+     return orderedResult;
+   }
+ 
+   public int getNumPartitionsViaSqlFilter(SqlFilterForPushdown filter) throws MetaException {
+     boolean doTrace = LOG.isDebugEnabled();
+     String catName = filter.table.getCatName().toLowerCase();
+     String dbName = filter.table.getDbName().toLowerCase();
+     String tblName = filter.table.getTableName().toLowerCase();
+ 
+     // Get number of partitions by doing count on PART_ID.
+     String queryText = "select count(" + PARTITIONS + ".\"PART_ID\") from " + PARTITIONS + ""
+       + "  inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " + TBLS + ".\"TBL_ID\" "
+       + "    and " + TBLS + ".\"TBL_NAME\" = ? "
+       + "  inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" "
+       + "     and " + DBS + ".\"NAME\" = ? "
+       + join(filter.joins, ' ')
+       + " where " + DBS + ".\"CTLG_NAME\" = ? "
+       + (filter.filter == null || filter.filter.trim().isEmpty() ? "" : (" and " + filter.filter));
+ 
+     Object[] params = new Object[filter.params.size() + 3];
+     params[0] = tblName;
+     params[1] = dbName;
+     params[2] = catName;
+     for (int i = 0; i < filter.params.size(); ++i) {
+       params[i + 3] = filter.params.get(i);
+     }
+ 
+     long start = doTrace ? System.nanoTime() : 0;
+     Query query = pm.newQuery("javax.jdo.query.SQL", queryText);
+     query.setUnique(true);
+     int sqlResult = extractSqlInt(query.executeWithArray(params));
+     long queryTime = doTrace ? System.nanoTime() : 0;
+     timingTrace(doTrace, queryText, start, queryTime);
+     return sqlResult;
+   }
+ 
+ 
+   private void timingTrace(boolean doTrace, String queryText, long start, long queryTime) {
+     if (!doTrace) return;
+     LOG.debug("Direct SQL query in " + (queryTime - start) / 1000000.0 + "ms + " +
+         (System.nanoTime() - queryTime) / 1000000.0 + "ms, the query is [" + queryText + "]");
+   }
+ 
+   static Long extractSqlLong(Object obj) throws MetaException {
+     if (obj == null) return null;
+     if (!(obj instanceof Number)) {
+       throw new MetaException("Expected numeric type but got " + obj.getClass().getName());
+     }
+     return ((Number)obj).longValue();
+   }
+ 
+   /**
+    * Convert a boolean value returned from the RDBMS to a Java Boolean object.
+    * MySQL has booleans, but e.g. Derby uses 'Y'/'N' mapping.
+    *
+    * @param value
+    *          column value from the database
+    * @return The Boolean value of the database column value, null if the column
+    *         value is null
+    * @throws MetaException
+    *           if the column value cannot be converted into a Boolean object
+    */
+   private static Boolean extractSqlBoolean(Object value) throws MetaException {
+     if (value == null) {
+       return null;
+     }
+     if (value instanceof Boolean) {
+       return (Boolean)value;
+     }
+     if (value instanceof String) {
+       try {
+         return BooleanUtils.toBooleanObject((String) value, "Y", "N", null);
+       } catch (IllegalArgumentException iae) {
+         // NOOP
+       }
+     }
+     throw new MetaException("Cannot extract boolean from column value " + value);
+   }
+ 
+   private int extractSqlInt(Object field) {
+     return ((Number)field).intValue();
+   }
+ 
+   private String extractSqlString(Object value) {
+     if (value == null) return null;
+     return value.toString();
+   }
+ 
+   static Double extractSqlDouble(Object obj) throws MetaException {
+     if (obj == null)
+       return null;
+     if (!(obj instanceof Number)) {
+       throw new MetaException("Expected numeric type but got " + obj.getClass().getName());
+     }
+     return ((Number) obj).doubleValue();
+   }
+ 
+   private String extractSqlClob(Object value) {
+     if (value == null) return null;
+     try {
+       if (value instanceof Clob) {
+         // we trim the Clob value to a max length an int can hold
+         int maxLength = (((Clob)value).length() < Integer.MAX_VALUE - 2) ? (int)((Clob)value).length() : Integer.MAX_VALUE - 2;
+         return ((Clob)value).getSubString(1L, maxLength);
+       } else {
+         return value.toString();
+       }
+     } catch (SQLException sqle) {
+       return null;
+     }
+   }
+ 
+   static byte[] extractSqlBlob(Object value) throws MetaException {
+     if (value == null)
+       return null;
+     if (value instanceof Blob) {
+       //derby, oracle
+       try {
+         // getBytes function says: pos the ordinal position of the first byte in
+         // the BLOB value to be extracted; the first byte is at position 1
+         return ((Blob) value).getBytes(1, (int) ((Blob) value).length());
+       } catch (SQLException e) {
+         throw new MetaException("Encounter error while processing blob.");
+       }
+     }
+     else if (value instanceof byte[]) {
+       // mysql, postgres, sql server
+       return (byte[]) value;
+     }
+ 	else {
+       // this may happen when enablebitvector is false
+       LOG.debug("Expected blob type but got " + value.getClass().getName());
+       return null;
+     }
+   }
+ 
+   /**
+    * Helper method for preparing for "SOMETHING_ID in (...)" to use in future queries.
+    * @param objectIds the objectId collection
+    * @return The concatenated list
+    * @throws MetaException If the list contains wrong data
+    */
+   private static String getIdListForIn(List<Object> objectIds) throws MetaException {
+     return objectIds.stream()
+                .map(i -> i.toString())
+                .collect(Collectors.joining(","));
+   }
+ 
+   private static String trimCommaList(StringBuilder sb) {
+     if (sb.length() > 0) {
+       sb.setLength(sb.length() - 1);
+     }
+     return sb.toString();
+   }
+ 
+   private abstract class ApplyFunc<Target> {
+     public abstract void apply(Target t, Object[] fields) throws MetaException;
+   }
+ 
+   /**
+    * Merges applies the result of a PM SQL query into a tree of object.
+    * Essentially it's an object join. DN could do this for us, but it issues queries
+    * separately for every object, which is suboptimal.
+    * @param tree The object tree, by ID.
+    * @param queryText The query text.
+    * @param keyIndex Index of the Long column corresponding to the map ID in query result rows.
+    * @param func The function that is called on each (object,row) pair with the same id.
+    * @return the count of results returned from the query.
+    */
+   private <T> int loopJoinOrderedResult(TreeMap<Long, T> tree,
+       String queryText, int keyIndex, ApplyFunc<T> func) throws MetaException {
+     boolean doTrace = LOG.isDebugEnabled();
+     long start = doTrace ? System.nanoTime() : 0;
+     Query query = pm.newQuery("javax.jdo.query.SQL", queryText);
+     Object result = query.execute();
+     long queryTime = doTrace ? System.nanoTime() : 0;
+     if (result == null) {
+       query.closeAll();
+       return 0;
+     }
+     List<Object[]> list = ensureList(result);
+     Iterator<Object[]> iter = list.iterator();
+     Object[] fields = null;
+     for (Map.Entry<Long, T> entry : tree.entrySet()) {
+       if (fields == null && !iter.hasNext()) break;
+       long id = entry.getKey();
+       while (fields != null || iter.hasNext()) {
+         if (fields == null) {
+           fields = iter.next();
+         }
+         long nestedId = extractSqlLong(fields[keyIndex]);
+         if (nestedId < id) throw new MetaException("Found entries for unknown ID " + nestedId);
+         if (nestedId > id) break; // fields belong to one of the next entries
+         func.apply(entry.getValue(), fields);
+         fields = null;
+       }
+       Deadline.checkTimeout();
+     }
+     int rv = list.size();
+     query.closeAll();
+     timingTrace(doTrace, queryText, start, queryTime);
+     return rv;
+   }
+ 
+   private static class PartitionFilterGenerator extends TreeVisitor {
+     private final Table table;
+     private final FilterBuilder filterBuffer;
+     private final List<Object> params;
+     private final List<String> joins;
+     private final boolean dbHasJoinCastBug;
+     private final String defaultPartName;
+     private final DatabaseProduct dbType;
+     private final String PARTITION_KEY_VALS, PARTITIONS, DBS, TBLS;
+ 
+     private PartitionFilterGenerator(Table table, List<Object> params, List<String> joins,
+         boolean dbHasJoinCastBug, String defaultPartName, DatabaseProduct dbType, String schema) {
+       this.table = table;
+       this.params = params;
+       this.joins = joins;
+       this.dbHasJoinCastBug = dbHasJoinCastBug;
+       this.filterBuffer = new FilterBuilder(false);
+       this.defaultPartName = defaultPartName;
+       this.dbType = dbType;
+       this.PARTITION_KEY_VALS = getFullyQualifiedName(schema, "PARTITION_KEY_VALS");
+       this.PARTITIONS = getFullyQualifiedName(schema, "PARTITIONS");
+       this.DBS = getFullyQualifiedName(schema, "DBS");
+       this.TBLS = getFullyQualifiedName(schema, "TBLS");
+     }
+ 
+     /**
+      * Generate the ANSI SQL92 filter for the given expression tree
+      * @param table the table being queried
+      * @param params the ordered parameters for the resulting expression
+      * @param joins the joins necessary for the resulting expression
+      * @return the string representation of the expression tree
+      */
+     private static String generateSqlFilter(Table table, ExpressionTree tree, List<Object> params,
+         List<String> joins, boolean dbHasJoinCastBug, String defaultPartName,
+         DatabaseProduct dbType, String schema) throws MetaException {
+       assert table != null;
+       if (tree == null) {
+         // consistent with other APIs like makeExpressionTree, null is returned to indicate that
+         // the filter could not pushed down due to parsing issue etc
+         return null;
+       }
+       if (tree.getRoot() == null) {
+         return "";
+       }
+       PartitionFilterGenerator visitor = new PartitionFilterGenerator(
+           table, params, joins, dbHasJoinCastBug, defaultPartName, dbType, schema);
+       tree.accept(visitor);
+       if (visitor.filterBuffer.hasError()) {
+         LOG.info("Unable to push down SQL filter: " + visitor.filterBuffer.getErrorMessage());
+         return null;
+       }
+ 
+       // Some joins might be null (see processNode for LeafNode), clean them up.
+       for (int i = 0; i < joins.size(); ++i) {
+         if (joins.get(i) != null) continue;
+         joins.remove(i--);
+       }
+       return "(" + visitor.filterBuffer.getFilter() + ")";
+     }
+ 
+     @Override
+     protected void beginTreeNode(TreeNode node) throws MetaException {
+       filterBuffer.append(" (");
+     }
+ 
+     @Override
+     protected void midTreeNode(TreeNode node) throws MetaException {
+       filterBuffer.append((node.getAndOr() == LogicalOperator.AND) ? " and " : " or ");
+     }
+ 
+     @Override
+     protected void endTreeNode(TreeNode node) throws MetaException {
+       filterBuffer.append(") ");
+     }
+ 
+     @Override
+     protected boolean shouldStop() {
+       return filterBuffer.hasError();
+     }
+ 
+     private static enum FilterType {
+       Integral,
+       String,
+       Date,
+ 
+       Invalid;
+ 
+       static FilterType fromType(String colTypeStr) {
+         if (colTypeStr.equals(ColumnType.STRING_TYPE_NAME)) {
+           return FilterType.String;
+         } else if (colTypeStr.equals(ColumnType.DATE_TYPE_NAME)) {
+           return FilterType.Date;
+         } else if (ColumnType.IntegralTypes.contains(colTypeStr)) {
+           return FilterType.Integral;
+         }
+         return FilterType.Invalid;
+       }
+ 
+       public static FilterType fromClass(Object value) {
+         if (value instanceof String) {
+           return FilterType.String;
+         } else if (value instanceof Long) {
+           return FilterType.Integral;
+         } else if (value instanceof java.sql.Date) {
+           return FilterType.Date;
+         }
+         return FilterType.Invalid;
+       }
+     }
+ 
+     @Override
+     public void visit(LeafNode node) throws MetaException {
+       if (node.operator == Operator.LIKE) {
+         filterBuffer.setError("LIKE is not supported for SQL filter pushdown");
+         return;
+       }
+       int partColCount = table.getPartitionKeys().size();
+       int partColIndex = node.getPartColIndexForFilter(table, filterBuffer);
+       if (filterBuffer.hasError()) return;
+ 
+       // We skipped 'like', other ops should all work as long as the types are right.
+       String colTypeStr = table.getPartitionKeys().get(partColIndex).getType();
+       FilterType colType = FilterType.fromType(colTypeStr);
+       if (colType == FilterType.Invalid) {
+         filterBuffer.setError("Filter pushdown not supported for type " + colTypeStr);
+         return;
+       }
+       FilterType valType = FilterType.fromClass(node.value);
+       Object nodeValue = node.value;
+       if (valType == FilterType.Invalid) {
+         filterBuffer.setError("Filter pushdown not supported for value " + node.value.getClass());
+         return;
+       }
+ 
+       // if Filter.g does date parsing for quoted strings, we'd need to verify there's no
+       // type mismatch when string col is filtered by a string that looks like date.
+       if (colType == FilterType.Date && valType == FilterType.String) {
+         // Filter.g cannot parse a quoted date; try to parse date here too.
+         try {
+           nodeValue = MetaStoreUtils.PARTITION_DATE_FORMAT.get().parse((String)nodeValue);
+           valType = FilterType.Date;
+         } catch (ParseException pe) { // do nothing, handled below - types will mismatch
+         }
+       }
+ 
+       // We format it so we are sure we are getting the right value
+       if (valType == FilterType.Date) {
+         // Format
+         nodeValue = MetaStoreUtils.PARTITION_DATE_FORMAT.get().format(nodeValue);
+       }
+ 
+       if (colType != valType) {
+         // It's not clear how filtering for e.g. "stringCol > 5" should work (which side is
+         // to be coerced?). Let the expression evaluation sort this one out, not metastore.
+         filterBuffer.setError("Cannot push down filter for "
+             + colTypeStr + " column and value " + nodeValue.getClass());
+         return;
+       }
+ 
+       if (joins.isEmpty()) {
+         // There's a fixed number of partition cols that we might have filters on. To avoid
+         // joining multiple times for one column (if there are several filters on it), we will
+         // keep numCols elements in the list, one for each column; we will fill it with nulls,
+         // put each join at a corresponding index when necessary, and remove nulls in the end.
+         for (int i = 0; i < partColCount; ++i) {
+           joins.add(null);
+         }
+       }
+       if (joins.get(partColIndex) == null) {
+         joins.set(partColIndex, "inner join " + PARTITION_KEY_VALS + " \"FILTER" + partColIndex
+             + "\" on \"FILTER"  + partColIndex + "\".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\""
+             + " and \"FILTER" + partColIndex + "\".\"INTEGER_IDX\" = " + partColIndex);
+       }
+ 
+       // Build the filter and add parameters linearly; we are traversing leaf nodes LTR.
+       String tableValue = "\"FILTER" + partColIndex + "\".\"PART_KEY_VAL\"";
+ 
+       if (node.isReverseOrder) {
+         params.add(nodeValue);
+       }
+       String tableColumn = tableValue;
+       if (colType != FilterType.String) {
+         // The underlying database field is varchar, we need to compare numbers.
+         if (colType == FilterType.Integral) {
+           tableValue = "cast(" + tableValue + " as decimal(21,0))";
+         } else if (colType == FilterType.Date) {
+           if (dbType == DatabaseProduct.ORACLE) {
+             // Oracle requires special treatment... as usual.
+             tableValue = "TO_DATE(" + tableValue + ", 'YYYY-MM-DD')";
+           } else {
+             tableValue = "cast(" + tableValue + " as date)";
+           }
+         }
+ 
+         // Workaround for HIVE_DEFAULT_PARTITION - ignore it like JDO does, for now.
+         String tableValue0 = tableValue;
+         tableValue = "(case when " + tableColumn + " <> ?";
+         params.add(defaultPartName);
+ 
+         if (dbHasJoinCastBug) {
+           // This is a workaround for DERBY-6358 and Oracle bug; it is pretty horrible.
+           tableValue += (" and " + TBLS + ".\"TBL_NAME\" = ? and " + DBS + ".\"NAME\" = ? and "
+               + DBS + ".\"CTLG_NAME\" = ? and "
+               + "\"FILTER" + partColIndex + "\".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\" and "
+                 + "\"FILTER" + partColIndex + "\".\"INTEGER_IDX\" = " + partColIndex);
+           params.add(table.getTableName().toLowerCase());
+           params.add(table.getDbName().toLowerCase());
+           params.add(table.getCatName().toLowerCase());
+         }
+         tableValue += " then " + tableValue0 + " else null end)";
+       }
+       if (!node.isReverseOrder) {
+         params.add(nodeValue);
+       }
+ 
+       filterBuffer.append(node.isReverseOrder
+           ? "(? " + node.operator.getSqlOp() + " " + tableValue + ")"
+           : "(" + tableValue + " " + node.operator.getSqlOp() + " ?)");
+     }
+   }
+ 
+   /**
+    * Retrieve the column statistics for the specified columns of the table. NULL
+    * is returned if the columns are not provided.
+    * @param catName     the catalog name of the table
+    * @param dbName      the database name of the table
+    * @param tableName   the table name
+    * @param colNames    the list of the column names
+    * @return            the column statistics for the specified columns
+    * @throws MetaException
+    */
+   public ColumnStatistics getTableStats(final String catName, final String dbName,
+                                         final String tableName, List<String> colNames,
+                                         boolean enableBitVector) throws MetaException {
+     if (colNames == null || colNames.isEmpty()) {
+       return null;
+     }
+     final boolean doTrace = LOG.isDebugEnabled();
+     final String queryText0 = "select " + getStatsList(enableBitVector) + " from " + TAB_COL_STATS
+           + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? and \"COLUMN_NAME\" in (";
+     Batchable<String, Object[]> b = new Batchable<String, Object[]>() {
+       @Override
+       public List<Object[]> run(List<String> input) throws MetaException {
+         String queryText = queryText0 + makeParams(input.size()) + ")";
+         Object[] params = new Object[input.size() + 3];
+         params[0] = catName;
+         params[1] = dbName;
+         params[2] = tableName;
+         for (int i = 0; i < input.size(); ++i) {
+           params[i + 3] = input.get(i);
+         }
+         long start = doTrace ? System.nanoTime() : 0;
+         Query query = pm.newQuery("javax.jdo.query.SQL", queryText);
+         Object qResult = executeWithArray(query, params, queryText);
+         timingTrace(doTrace, queryText0 + "...)", start, (doTrace ? System.nanoTime() : 0));
+         if (qResult == null) {
+           query.closeAll();
+           return null;
+         }
+         addQueryAfterUse(query);
+         return ensureList(qResult);
+       }
+     };
+     List<Object[]> list = Batchable.runBatched(batchSize, colNames, b);
+     if (list.isEmpty()) {
+       return null;
+     }
+     ColumnStatisticsDesc csd = new ColumnStatisticsDesc(true, dbName, tableName);
+     csd.setCatName(catName);
+     ColumnStatistics result = makeColumnStats(list, csd, 0);
+     b.closeAllQueries();
+     return result;
+   }
+ 
+   public AggrStats aggrColStatsForPartitions(String catName, String dbName, String tableName,
+       List<String> partNames, List<String> colNames, boolean useDensityFunctionForNDVEstimation,
+       double ndvTuner, boolean enableBitVector) throws MetaException {
+     if (colNames.isEmpty() || partNames.isEmpty()) {
+       LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval");
+       return new AggrStats(Collections.<ColumnStatisticsObj>emptyList(), 0); // Nothing to aggregate
+     }
+     long partsFound = 0;
+     List<ColumnStatisticsObj> colStatsList;
+     // Try to read from the cache first
+     if (isAggregateStatsCacheEnabled
+         && (partNames.size() < aggrStatsCache.getMaxPartsPerCacheNode())) {
+       AggrColStats colStatsAggrCached;
+       List<ColumnStatisticsObj> colStatsAggrFromDB;
+       int maxPartsPerCacheNode = aggrStatsCache.getMaxPartsPerCacheNode();
+       double fpp = aggrStatsCache.getFalsePositiveProbability();
+       colStatsList = new ArrayList<ColumnStatisticsObj>();
+       // Bloom filter for the new node that we will eventually add to the cache
+       BloomFilter bloomFilter = createPartsBloomFilter(maxPartsPerCacheNode, fpp, partNames);
+       boolean computePartsFound = true;
+       for (String colName : colNames) {
+         // Check the cache first
+         colStatsAggrCached = aggrStatsCache.get(catName, dbName, tableName, colName, partNames);
+         if (colStatsAggrCached != null) {
+           colStatsList.add(colStatsAggrCached.getColStats());
+           partsFound = colStatsAggrCached.getNumPartsCached();
+         } else {
+           if (computePartsFound) {
+             partsFound = partsFoundForPartitions(catName, dbName, tableName, partNames, colNames);
+             computePartsFound = false;
+           }
+           List<String> colNamesForDB = new ArrayList<>();
+           colNamesForDB.add(colName);
+           // Read aggregated stats for one column
+           colStatsAggrFromDB =
+               columnStatisticsObjForPartitions(catName, dbName, tableName, partNames, colNamesForDB,
+                   partsFound, useDensityFunctionForNDVEstimation, ndvTuner, enableBitVector);
+           if (!colStatsAggrFromDB.isEmpty()) {
+             ColumnStatisticsObj colStatsAggr = colStatsAggrFromDB.get(0);
+             colStatsList.add(colStatsAggr);
+             // Update the cache to add this new aggregate node
+             aggrStatsCache.add(catName, dbName, tableName, colName, partsFound, colStatsAggr, bloomFilter);
+           }
+         }
+       }
+     } else {
+       partsFound = partsFoundForPartitions(catName, dbName, tableName, partNames, colNames);
+       colStatsList =
+           columnStatisticsObjForPartitions(catName, dbName, tableName, partNames, colNames, partsFound,
+               useDensityFunctionForNDVEstimation, ndvTuner, enableBitVector);
+     }
+     LOG.info("useDensityFunctionForNDVEstimation = " + useDensityFunctionForNDVEstimation
+         + "\npartsFound = " + partsFound + "\nColumnStatisticsObj = "
+         + Arrays.toString(colStatsList.toArray()));
+     return new AggrStats(colStatsList, partsFound);
+   }
+ 
+   private BloomFilter createPartsBloomFilter(int maxPartsPerCacheNode, double fpp,
+       List<String> partNames) {
+     BloomFilter bloomFilter = new BloomFilter(maxPartsPerCacheNode, fpp);
+     for (String partName : partNames) {
+       bloomFilter.add(partName.getBytes());
+     }
+     return bloomFilter;
+   }
+ 
+   private long partsFoundForPartitions(
+       final String catName, final String dbName, final String tableName,
+       final List<String> partNames, List<String> colNames) throws MetaException {
+     assert !colNames.isEmpty() && !partNames.isEmpty();
+     final boolean doTrace = LOG.isDebugEnabled();
+     final String queryText0  = "select count(\"COLUMN_NAME\") from " + PART_COL_STATS + ""
+         + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? "
+         + " and \"COLUMN_NAME\" in (%1$s) and \"PARTITION_NAME\" in (%2$s)"
+         + " group by \"PARTITION_NAME\"";
+     List<Long> allCounts = Batchable.runBatched(batchSize, colNames, new Batchable<String, Long>() {
+       @Override
+       public List<Long> run(final List<String> inputColName) throws MetaException {
+         return Batchable.runBatched(batchSize, partNames, new Batchable<String, Long>() {
+           @Override
+           public List<Long> run(List<String> inputPartNames) throws MetaException {
+             long partsFound = 0;
+             String queryText = String.format(queryText0,
+                 makeParams(inputColName.size()), makeParams(inputPartNames.size()));
+             long start = doTrace ? System.nanoTime() : 0;
+             Query query = pm.newQuery("javax.jdo.query.SQL", queryText);
+             try {
+               Object qResult = executeWithArray(query, prepareParams(
+                   catName, dbName, tableName, inputPartNames, inputColName), queryText);
+               long end = doTrace ? System.nanoTime() : 0;
+               timingTrace(doTrace, queryText, start, end);
+               ForwardQueryResult<?> fqr = (ForwardQueryResult<?>) qResult;
+               Iterator<?> iter = fqr.iterator();
+               while (iter.hasNext()) {
+                 if (extractSqlLong(iter.next()) == inputColName.size()) {
+                   partsFound++;
+                 }
+               }
+               return Lists.<Long>newArrayList(partsFound);
+             } finally {
+               query.closeAll();
+             }
+           }
+         });
+       }
+     });
+     long partsFound = 0;
+     for (Long val : allCounts) {
+       partsFound += val;
+     }
+     return partsFound;
+   }
+ 
+   private List<ColumnStatisticsObj> columnStatisticsObjForPartitions(
+       final String catName, final String dbName,
+     final String tableName, final List<String> partNames, List<String> colNames, long partsFound,
+     final boolean useDensityFunctionForNDVEstimation, final double ndvTuner, final boolean enableBitVector) throws MetaException {
+     final boolean areAllPartsFound = (partsFound == partNames.size());
+     return Batchable.runBatched(batchSize, colNames, new Batchable<String, ColumnStatisticsObj>() {
+       @Override
+       public List<ColumnStatisticsObj> run(final List<String> inputColNames) throws MetaException {
+         return Batchable.runBatched(batchSize, partNames, new Batchable<String, ColumnStatisticsObj>() {
+           @Override
+           public List<ColumnStatisticsObj> run(List<String> inputPartNames) throws MetaException {
+             return columnStatisticsObjForPartitionsBatch(catName, dbName, tableName, inputPartNames,
+                 inputColNames, areAllPartsFound, useDensityFunctionForNDVEstimation, ndvTuner, enableBitVector);
+           }
+         });
+       }
+     });
+   }
+ 
+   public List<ColStatsObjWithSourceInfo> getColStatsForAllTablePartitions(String catName, String dbName,
+       boolean enableBitVector) throws MetaException {
+     String queryText = "select \"TABLE_NAME\", \"PARTITION_NAME\", " + getStatsList(enableBitVector)
+         + " from " + " " + PART_COL_STATS + " where \"DB_NAME\" = ? and \"CAT_NAME\" = ?";
+     long start = 0;
+     long end = 0;
+     Query query = null;
+     boolean doTrace = LOG.isDebugEnabled();
+     Object qResult = null;
+     start = doTrace ? System.nanoTime() : 0;
+     List<ColStatsObjWithSourceInfo> colStatsForDB = new ArrayList<ColStatsObjWithSourceInfo>();
+     try {
+       query = pm.newQuery("javax.jdo.query.SQL", queryText);
+       qResult = executeWithArray(query, new Object[] { dbName, catName }, queryText);
+       if (qResult == null) {
+         query.closeAll();
+         return colStatsForDB;
+       }
+       end = doTrace ? System.nanoTime() : 0;
+       timingTrace(doTrace, queryText, start, end);
+       List<Object[]> list = ensureList(qResult);
+       for (Object[] row : list) {
+         String tblName = (String) row[0];
+         String partName = (String) row[1];
+         ColumnStatisticsObj colStatObj = prepareCSObj(row, 2);
+         colStatsForDB.add(new ColStatsObjWithSourceInfo(colStatObj, catName, dbName, tblName, partName));
+         Deadline.checkTimeout();
+       }
+     } finally {
+       query.closeAll();
+     }
+     return colStatsForDB;
+   }
+ 
+   /** Should be called with the list short enough to not trip up Oracle/etc. */
+   private List<ColumnStatisticsObj> columnStatisticsObjForPartitionsBatch(String catName, String dbName,
+       String tableName, List<String> partNames, List<String> colNames, boolean areAllPartsFound,
+       boolean useDensityFunctionForNDVEstimation, double ndvTuner, boolean enableBitVector)
+       throws MetaException {
+     if (enableBitVector) {
+       return aggrStatsUseJava(catName, dbName, tableName, partNames, colNames, areAllPartsFound,
+           useDensityFunctionForNDVEstimation, ndvTuner);
+     } else {
+       return aggrStatsUseDB(catName, dbName, tableName, partNames, colNames, areAllPartsFound,
+           useDensityFunctionForNDVEstimation, ndvTuner);
+     }
+   }
+ 
+   private List<ColumnStatisticsObj> aggrStatsUseJava(String catName, String dbName, String tableName,
+       List<String> partNames, List<String> colNames, boolean areAllPartsFound,
+       boolean useDensityFunctionForNDVEstimation, double ndvTuner) throws MetaException {
+     // 1. get all the stats for colNames in partNames;
+     List<ColumnStatistics> partStats =
+         getPartitionStats(catName, dbName, tableName, partNames, colNames, true);
+     // 2. use util function to aggr stats
+     return MetaStoreUtils.aggrPartitionStats(partStats, catName, dbName, tableName, partNames, colNames,
+         areAllPartsFound, useDensityFunctionForNDVEstimation, ndvTuner);
+   }
+ 
+   private List<ColumnStatisticsObj> aggrStatsUseDB(String catName, String dbName,
+       String tableName, List<String> partNames, List<String> colNames, boolean areAllPartsFound,
+       boolean useDensityFunctionForNDVEstimation, double ndvTuner) throws MetaException {
+     // TODO: all the extrapolation logic should be moved out of this class,
+     // only mechanical data retrieval should remain here.
+     String commonPrefix = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", "
+         + "min(\"LONG_LOW_VALUE\"), max(\"LONG_HIGH_VALUE\"), min(\"DOUBLE_LOW_VALUE\"), max(\"DOUBLE_HIGH_VALUE\"), "
+         + "min(cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal)), max(cast(\"BIG_DECIMAL_HIGH_VALUE\" as decimal)), "
+         + "sum(\"NUM_NULLS\"), max(\"NUM_DISTINCTS\"), "
+         + "max(\"AVG_COL_LEN\"), max(\"MAX_COL_LEN\"), sum(\"NUM_TRUES\"), sum(\"NUM_FALSES\"), "
+         // The following data is used to compute a partitioned table's NDV based
+         // on partitions' NDV when useDensityFunctionForNDVEstimation = true. Global NDVs cannot be
+         // accurately derived from partition NDVs, because the domain of column value two partitions
+         // can overlap. If there is no overlap then global NDV is just the sum
+         // of partition NDVs (UpperBound). But if there is some overlay then
+         // global NDV can be anywhere between sum of partition NDVs (no overlap)
+         // and same as one of the partition NDV (domain of column value in all other
+         // partitions is subset of the domain value in one of the partition)
+         // (LowerBound).But under uniform distribution, we can roughly estimate the global
+         // NDV by leveraging the min/max values.
+         // And, we also guarantee that the estimation makes sense by comparing it to the
+         // UpperBound (calculated by "sum(\"NUM_DISTINCTS\")")
+         // and LowerBound (calculated by "max(\"NUM_DISTINCTS\")")
+         + "avg((\"LONG_HIGH_VALUE\"-\"LONG_LOW_VALUE\")/cast(\"NUM_DISTINCTS\" as decimal)),"
+         + "avg((\"DOUBLE_HIGH_VALUE\"-\"DOUBLE_LOW_VALUE\")/\"NUM_DISTINCTS\"),"
+         + "avg((cast(\"BIG_DECIMAL_HIGH_VALUE\" as decimal)-cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal))/\"NUM_DISTINCTS\"),"
+         + "sum(\"NUM_DISTINCTS\")" + " from " + PART_COL_STATS + ""
+         + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? ";
+     String queryText = null;
+     long start = 0;
+     long end = 0;
+     Query query = null;
+     boolean doTrace = LOG.isDebugEnabled();
+     Object qResult = null;
+     ForwardQueryResult<?> fqr = null;
+     // Check if the status of all the columns of all the partitions exists
+     // Extrapolation is not needed.
+     if (areAllPartsFound) {
+       queryText = commonPrefix + " and \"COLUMN_NAME\" in (" + makeParams(colNames.size()) + ")"
+           + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")"
+           + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\"";
+       start = doTrace ? System.nanoTime() : 0;
+       query = pm.newQuery("javax.jdo.query.SQL", queryText);
+       qResult = executeWithArray(query, prepareParams(catName, dbName, tableName, partNames, colNames),
+           queryText);
+       if (qResult == null) {
+         query.closeAll();
+         return Collections.emptyList();
+       }
+       end = doTrace ? System.nanoTime() : 0;
+       timingTrace(doTrace, queryText, start, end);
+       List<Object[]> list = ensureList(qResult);
+       List<ColumnStatisticsObj> colStats = new ArrayList<ColumnStatisticsObj>(list.size());
+       for (Object[] row : list) {
+         colStats.add(prepareCSObjWithAdjustedNDV(row, 0, useDensityFunctionForNDVEstimation, ndvTuner));
+         Deadline.checkTimeout();
+       }
+       query.closeAll();
+       return colStats;
+     } else {
+       // Extrapolation is needed for some columns.
+       // In this case, at least a column status for a partition is missing.
+       // We need to extrapolate this partition based on the other partitions
+       List<ColumnStatisticsObj> colStats = new ArrayList<ColumnStatisticsObj>(colNames.size());
+       queryText = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", count(\"PARTITION_NAME\") "
+           + " from " + PART_COL_STATS
+           + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? "
+           + " and \"COLUMN_NAME\" in (" + makeParams(colNames.size()) + ")"
+           + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")"
+           + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\"";
+       start = doTrace ? System.nanoTime() : 0;
+       query = pm.newQuery("javax.jdo.query.SQL", queryText);
+       qResult = executeWithArray(query, prepareParams(catName, dbName, tableName, partNames, colNames),
+           queryText);
+       end = doTrace ? System.nanoTime() : 0;
+       timingTrace(doTrace, queryText, start, end);
+       if (qResult == null) {
+         query.closeAll();
+         return Collections.emptyList();
+       }
+       List<String> noExtraColumnNames = new ArrayList<String>();
+       Map<String, String[]> extraColumnNameTypeParts = new HashMap<String, String[]>();
+       List<Object[]> list = ensureList(qResult);
+       for (Object[] row : list) {
+         String colName = (String) row[0];
+         String colType = (String) row[1];
+         // Extrapolation is not needed for this column if
+         // count(\"PARTITION_NAME\")==partNames.size()
+         // Or, extrapolation is not possible for this column if
+         // count(\"PARTITION_NAME\")<2
+         Long count = extractSqlLong(row[2]);
+         if (count == partNames.size() || count < 2) {
+           noExtraColumnNames.add(colName);
+         } else {
+           extraColumnNameTypeParts.put(colName, new String[] { colType, String.valueOf(count) });
+         }
+         Deadline.checkTimeout();
+       }
+       query.closeAll();
+       // Extrapolation is not needed for columns noExtraColumnNames
+       if (noExtraColumnNames.size() != 0) {
+         queryText = commonPrefix + " and \"COLUMN_NAME\" in ("
+             + makeParams(noExtraColumnNames.size()) + ")" + " and \"PARTITION_NAME\" in ("
+             + makeParams(partNames.size()) + ")" + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\"";
+         start = doTrace ? System.nanoTime() : 0;
+         query = pm.newQuery("javax.jdo.query.SQL", queryText);
+         qResult = executeWithArray(query,
+             prepareParams(catName, dbName, tableName, partNames, noExtraColumnNames), queryText);
+         if (qResult == null) {
+           query.closeAll();
+           return Collections.emptyList();
+         }
+         list = ensureList(qResult);
+         for (Object[] row : list) {
+           colStats.add(prepareCSObjWithAdjustedNDV(row, 0, useDensityFunctionForNDVEstimation, ndvTuner));
+           Deadline.checkTimeout();
+         }
+         end = doTrace ? System.nanoTime() : 0;
+         timingTrace(doTrace, queryText, start, end);
+         query.closeAll();
+       }
+       // Extrapolation is needed for extraColumnNames.
+       // give a sequence number for all the partitions
+       if (extraColumnNameTypeParts.size() != 0) {
+         Map<String, Integer> indexMap = new HashMap<String, Integer>();
+         for (int index = 0; index < partNames.size(); index++) {
+           indexMap.put(partNames.get(index), index);
+         }
+         // get sum for all columns to reduce the number of queries
+         Map<String, Map<Integer, Object>> sumMap = new HashMap<String, Map<Integer, Object>>();
+         queryText = "select \"COLUMN_NAME\", sum(\"NUM_NULLS\"), sum(\"NUM_TRUES\"), sum(\"NUM_FALSES\"), sum(\"NUM_DISTINCTS\")"
+             + " from " + PART_COL_STATS + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ? "
+             + " and \"COLUMN_NAME\" in (" + makeParams(extraColumnNameTypeParts.size())
+             + ") and \"PARTITION_NAME\" in (" + makeParams(partNames.size())
+             + ") group by \"COLUMN_NAME\"";
+         start = doTrace ? System.nanoTime() : 0;
+         query = pm.newQuery("javax.jdo.query.SQL", queryText);
+         List<String> extraColumnNames = new ArrayList<String>();
+         extraColumnNames.addAll(extraColumnNameTypeParts.keySet());
+         qResult = executeWithArray(query,
+             prepareParams(catName, dbName, tableName, partNames, extraColumnNames), queryText);
+         if (qResult == null) {
+           query.closeAll();
+           return Collections.emptyList();
+         }
+         list = ensureList(qResult);
+         // see the indexes for colstats in IExtrapolatePartStatus
+         Integer[] sumIndex = new Integer[] { 6, 10, 11, 15 };
+         for (Object[] row : list) {
+           Map<Integer, Object> indexToObject = new HashMap<Integer, Object>();
+           for (int ind = 1; ind < row.length; ind++) {
+             indexToObject.put(sumIndex[ind - 1], row[ind]);
+           }
+           // row[0] is the column name
+           sumMap.put((String) row[0], indexToObject);
+           Deadline.checkTimeout();
+         }
+         end = doTrace ? System.nanoTime() : 0;
+         timingTrace(doTrace, queryText, start, end);
+         query.closeAll();
+         for (Map.Entry<String, String[]> entry : extraColumnNameTypeParts.entrySet()) {
+           Object[] row = new Object[IExtrapolatePartStatus.colStatNames.length + 2];
+           String colName = entry.getKey();
+           String colType = entry.getValue()[0];
+           Long sumVal = Long.parseLong(entry.getValue()[1]);
+           // fill in colname
+           row[0] = colName;
+           // fill in coltype
+           row[1] = colType;
+           // use linear extrapolation. more complicated one can be added in the
+           // future.
+           IExtrapolatePartStatus extrapolateMethod = new LinearExtrapolatePartStatus();
+           // fill in colstatus
+           Integer[] index = null;
+           boolean decimal = false;
+           if (colType.toLowerCase().startsWith("decimal")) {
+             index = IExtrapolatePartStatus.indexMaps.get("decimal");
+             decimal = true;
+           } else {
+             index = IExtrapolatePartStatus.indexMaps.get(colType.toLowerCase());
+           }
+           // if the colType is not the known type, long, double, etc, then get
+           // all index.
+           if (index == null) {
+             index = IExtrapolatePartStatus.indexMaps.get("default");
+           }
+           for (int colStatIndex : index) {
+             String colStatName = IExtrapolatePartStatus.colStatNames[colStatIndex];
+             // if the aggregation type is sum, we do a scale-up
+             if (IExtrapolatePartStatus.aggrTypes[colStatIndex] == IExtrapolatePartStatus.AggrType.Sum) {
+               Object o = sumMap.get(colName).get(colStatIndex);
+               if (o == null) {
+                 row[2 + colStatIndex] = null;
+               } else {
+                 Long val = extractSqlLong(o);
+                 row[2 + colStatIndex] = val / sumVal * (partNames.size());
+               }
+             } else if (IExtrapolatePartStatus.aggrTypes[colStatIndex] == IExtrapolatePartStatus.AggrType.Min
+                 || IExtrapolatePartStatus.aggrTypes[colStatIndex] == IExtrapolatePartStatus.AggrType.Max) {
+               // if the aggregation type is min/max, we extrapolate from the
+               // left/right borders
+               if (!decimal) {
+                 queryText = "select \"" + colStatName
+                     + "\",\"PARTITION_NAME\" from " + PART_COL_STATS
+                     + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ?" + " and \"COLUMN_NAME\" = ?"
+                     + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")"
+                     + " order by \"" + colStatName + "\"";
+               } else {
+                 queryText = "select \"" + colStatName
+                     + "\",\"PARTITION_NAME\" from " + PART_COL_STATS
+                     + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ?" + " and \"COLUMN_NAME\" = ?"
+                     + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")"
+                     + " order by cast(\"" + colStatName + "\" as decimal)";
+               }
+               start = doTrace ? System.nanoTime() : 0;
+               query = pm.newQuery("javax.jdo.query.SQL", queryText);
+               qResult = executeWithArray(query,
+                   prepareParams(catName, dbName, tableName, partNames, Arrays.asList(colName)), queryText);
+               if (qResult == null) {
+                 query.closeAll();
+                 return Collections.emptyList();
+               }
+               fqr = (ForwardQueryResult<?>) qResult;
+               Object[] min = (Object[]) (fqr.get(0));
+               Object[] max = (Object[]) (fqr.get(fqr.size() - 1));
+               end = doTrace ? System.nanoTime() : 0;
+               timingTrace(doTrace, queryText, start, end);
+               query.closeAll();
+               if (min[0] == null || max[0] == null) {
+                 row[2 + colStatIndex] = null;
+               } else {
+                 row[2 + colStatIndex] = extrapolateMethod.extrapolate(min, max, colStatIndex,
+                     indexMap);
+               }
+             } else {
+               // if the aggregation type is avg, we use the average on the existing ones.
+               queryText = "select "
+                   + "avg((\"LONG_HIGH_VALUE\"-\"LONG_LOW_VALUE\")/cast(\"NUM_DISTINCTS\" as decimal)),"
+                   + "avg((\"DOUBLE_HIGH_VALUE\"-\"DOUBLE_LOW_VALUE\")/\"NUM_DISTINCTS\"),"
+                   + "avg((cast(\"BIG_DECIMAL_HIGH_VALUE\" as decimal)-cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal))/\"NUM_DISTINCTS\")"
+                   + " from " + PART_COL_STATS + "" + " where \"CAT_NAME\" = ? and \"DB_NAME\" = ? and \"TABLE_NAME\" = ?"
+                   + " and \"COLUMN_NAME\" = ?" + " and \"PARTITION_NAME\" in ("
+                   + makeParams(partNames.size()) + ")" + " group by \"COLUMN_NAME\"";
+               start = doTrace ? System.nanoTime() : 0;
+               query = pm.newQuery("javax.jdo.query.SQL", queryText);
+               qResult = executeWithArray(query,
+                   prepareParams(catName, dbName, tableName, partNames, Arrays.asList(colName)), queryText);
+               if (qResult == null) {
+                 query.closeAll();
+                 return Collections.emptyList();
+               }
+               fqr = (Forward

<TRUNCATED>

[24/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 0000000,9661beb..70be8d8
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@@ -1,0 -1,3699 +1,3757 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ 
+ import java.io.IOException;
+ import java.nio.ByteBuffer;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Map.Entry;
+ 
+ import org.apache.hadoop.classification.InterfaceAudience;
+ import org.apache.hadoop.classification.InterfaceStability;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.ValidTxnList;
+ import org.apache.hadoop.hive.common.ValidWriteIdList;
+ import org.apache.hadoop.hive.common.classification.RetrySemantics;
+ import org.apache.hadoop.hive.metastore.annotation.NoReconnect;
+ import org.apache.hadoop.hive.metastore.api.AggrStats;
+ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+ import org.apache.hadoop.hive.metastore.api.CheckConstraintsRequest;
+ import org.apache.hadoop.hive.metastore.api.Catalog;
+ import org.apache.hadoop.hive.metastore.api.CmRecycleRequest;
+ import org.apache.hadoop.hive.metastore.api.CmRecycleResponse;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+ import org.apache.hadoop.hive.metastore.api.CommitTxnRequest;
+ import org.apache.hadoop.hive.metastore.api.CompactionResponse;
+ import org.apache.hadoop.hive.metastore.api.CompactionType;
+ import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
+ import org.apache.hadoop.hive.metastore.api.CreationMetadata;
+ import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
+ import org.apache.hadoop.hive.metastore.api.DataOperationType;
+ import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.FindSchemasByColsResp;
+ import org.apache.hadoop.hive.metastore.api.FindSchemasByColsRqst;
+ import org.apache.hadoop.hive.metastore.api.FireEventRequest;
+ import org.apache.hadoop.hive.metastore.api.FireEventResponse;
+ import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest;
+ import org.apache.hadoop.hive.metastore.api.Function;
+ import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse;
+ import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
+ import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleRequest;
+ import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleResponse;
+ import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest;
+ import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse;
+ import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
+ import org.apache.hadoop.hive.metastore.api.ISchema;
+ import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+ import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
+ import org.apache.hadoop.hive.metastore.api.LockRequest;
+ import org.apache.hadoop.hive.metastore.api.LockResponse;
+ import org.apache.hadoop.hive.metastore.api.Materialization;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.MetadataPpdResult;
+ import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
+ import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
+ import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+ import org.apache.hadoop.hive.metastore.api.PartitionValuesRequest;
+ import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
+ import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest;
+ import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+ import org.apache.hadoop.hive.metastore.api.PrincipalType;
+ import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+ import org.apache.hadoop.hive.metastore.api.Role;
+ import org.apache.hadoop.hive.metastore.api.RuntimeStat;
+ import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+ import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+ import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
+ import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+ import org.apache.hadoop.hive.metastore.api.SchemaVersionState;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
+ import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
+ import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
+ import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.api.TableMeta;
+ import org.apache.hadoop.hive.metastore.api.TableValidWriteIds;
+ import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
+ import org.apache.hadoop.hive.metastore.api.TxnOpenException;
+ import org.apache.hadoop.hive.metastore.api.TxnToWriteId;
+ import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest;
+ import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+ import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
+ import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+ import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
+ import org.apache.hadoop.hive.metastore.api.WMMapping;
+ import org.apache.hadoop.hive.metastore.api.WMNullablePool;
+ import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
+ import org.apache.hadoop.hive.metastore.api.WMPool;
+ import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
+ import org.apache.hadoop.hive.metastore.api.WMTrigger;
+ import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
+ import org.apache.hadoop.hive.metastore.api.WriteNotificationLogRequest;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.utils.ObjectPair;
+ import org.apache.thrift.TException;
+ 
+ /**
+  * Wrapper around hive metastore thrift api
+  */
+ @InterfaceAudience.Public
+ @InterfaceStability.Evolving
+ public interface IMetaStoreClient {
+ 
+   /**
+    * Returns whether current client is compatible with conf argument or not
+    * @return
+    */
+   boolean isCompatibleWith(Configuration conf);
+ 
+   /**
+    * Set added jars path info to MetaStoreClient.
+    * @param addedJars the hive.added.jars.path. It is qualified paths separated by commas.
+    */
+   void setHiveAddedJars(String addedJars);
+ 
+   /**
+    * Returns true if the current client is using an in process metastore (local metastore).
+    *
+    * @return
+    */
+   boolean isLocalMetaStore();
+ 
+   /**
+    *  Tries to reconnect this MetaStoreClient to the MetaStore.
+    */
+   void reconnect() throws MetaException;
+ 
+   /**
+    * close connection to meta store
+    */
+   @NoReconnect
+   void close();
+ 
+   /**
+    * set meta variable which is open to end users
+    */
+   void setMetaConf(String key, String value) throws MetaException, TException;
+ 
+   /**
+    * get current meta variable
+    */
+   String getMetaConf(String key) throws MetaException, TException;
+ 
+   /**
+    * Create a new catalog.
+    * @param catalog catalog object to create.
+    * @throws AlreadyExistsException A catalog of this name already exists.
+    * @throws InvalidObjectException There is something wrong with the passed in catalog object.
+    * @throws MetaException something went wrong, usually either in the database or trying to
+    * create the directory for the catalog.
+    * @throws TException general thrift exception.
+    */
+   void createCatalog(Catalog catalog)
+       throws AlreadyExistsException, InvalidObjectException, MetaException, TException;
+ 
+   /**
+    * Alter an existing catalog.
+    * @param catalogName the name of the catalog to alter.
+    * @param newCatalog the new catalog object.  All relevant details of the catalog should be
+    *                   set, don't rely on the system to figure out what you changed and only copy
+    *                   that in.
+    * @throws NoSuchObjectException no catalog of this name exists
+    * @throws InvalidObjectException an attempt was made to make an unsupported change (such as
+    * catalog name).
+    * @throws MetaException usually indicates a database error
+    * @throws TException general thrift exception
+    */
+   void alterCatalog(String catalogName, Catalog newCatalog)
+       throws NoSuchObjectException, InvalidObjectException, MetaException, TException;
+ 
+   /**
+    * Get a catalog object.
+    * @param catName Name of the catalog to fetch.
+    * @return The catalog.
+    * @throws NoSuchObjectException no catalog of this name exists.
+    * @throws MetaException something went wrong, usually in the database.
+    * @throws TException general thrift exception.
+    */
+   Catalog getCatalog(String catName) throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get a list of all catalogs known to the system.
+    * @return list of catalog names
+    * @throws MetaException something went wrong, usually in the database.
+    * @throws TException general thrift exception.
+    */
+   List<String> getCatalogs() throws MetaException, TException;
+ 
+   /**
+    * Drop a catalog.  Catalogs must be empty to be dropped, there is no cascade for dropping a
+    * catalog.
+    * @param catName name of the catalog to drop
+    * @throws NoSuchObjectException no catalog of this name exists.
+    * @throws InvalidOperationException The catalog is not empty and cannot be dropped.
+    * @throws MetaException something went wrong, usually in the database.
+    * @throws TException general thrift exception.
+    */
+   void dropCatalog(String catName)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException;
+ 
+   /**
+    * Get the names of all databases in the default catalog that match the given pattern.
+    * @param databasePattern pattern for the database name to patch
+    * @return List of database names.
+    * @throws MetaException error accessing RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<String> getDatabases(String databasePattern) throws MetaException, TException;
+ 
+   /**
+    * Get all databases in a catalog whose names match a pattern.
+    * @param catName  catalog name.  Can be null, in which case the default catalog is assumed.
+    * @param databasePattern pattern for the database name to match
+    * @return list of database names
+    * @throws MetaException error accessing RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<String> getDatabases(String catName, String databasePattern)
+       throws MetaException, TException;
+ 
+   /**
+    * Get the names of all databases in the MetaStore.
+    * @return List of database names in the default catalog.
+    * @throws MetaException error accessing RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<String> getAllDatabases() throws MetaException, TException;
+ 
+   /**
+    * Get all databases in a catalog.
+    * @param catName catalog name.  Can be null, in which case the default catalog is assumed.
+    * @return list of all database names
+    * @throws MetaException error accessing RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<String> getAllDatabases(String catName) throws MetaException, TException;
+ 
+   /**
+    * Get the names of all tables in the specified database that satisfy the supplied
+    * table name pattern.
+    * @param dbName database name.
+    * @param tablePattern pattern for table name to conform to
+    * @return List of table names.
+    * @throws MetaException error fetching information from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException indicated database to search in does not exist.
+    */
+   List<String> getTables(String dbName, String tablePattern)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Get the names of all tables in the specified database that satisfy the supplied
+    * table name pattern.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tablePattern pattern for table name to conform to
+    * @return List of table names.
+    * @throws MetaException error fetching information from the RDBMS
+    * @throws TException general thrift error
+    * @throws UnknownDBException indicated database to search in does not exist.
+    */
+   List<String> getTables(String catName, String dbName, String tablePattern)
+       throws MetaException, TException, UnknownDBException;
+ 
+ 
+   /**
+    * Get the names of all tables in the specified database that satisfy the supplied
+    * table name pattern and table type (MANAGED_TABLE || EXTERNAL_TABLE || VIRTUAL_VIEW)
+    * @param dbName Name of the database to fetch tables in.
+    * @param tablePattern pattern to match for table names.
+    * @param tableType Type of the table in the HMS store. VIRTUAL_VIEW is for views.
+    * @return List of table names.
+    * @throws MetaException error fetching information from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException indicated database does not exist.
+    */
+   List<String> getTables(String dbName, String tablePattern, TableType tableType)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Get the names of all tables in the specified database that satisfy the supplied
+    * table name pattern and table type (MANAGED_TABLE || EXTERNAL_TABLE || VIRTUAL_VIEW)
+    * @param catName catalog name.
+    * @param dbName Name of the database to fetch tables in.
+    * @param tablePattern pattern to match for table names.
+    * @param tableType Type of the table in the HMS store. VIRTUAL_VIEW is for views.
+    * @return List of table names.
+    * @throws MetaException error fetching information from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException indicated database does not exist.
+    */
+   List<String> getTables(String catName, String dbName, String tablePattern, TableType tableType)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Get materialized views that have rewriting enabled.  This will use the default catalog.
+    * @param dbName Name of the database to fetch materialized views from.
+    * @return List of materialized view names.
+    * @throws MetaException error fetching from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException no such database
+    */
+   List<String> getMaterializedViewsForRewriting(String dbName)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Get materialized views that have rewriting enabled.
+    * @param catName catalog name.
+    * @param dbName Name of the database to fetch materialized views from.
+    * @return List of materialized view names.
+    * @throws MetaException error fetching from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException no such database
+    */
+   List<String> getMaterializedViewsForRewriting(String catName, String dbName)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Fetches just table name and comments.  Useful when you need full table name
+    * (catalog.database.table) but don't need extra information like partition columns that
+    * require additional fetches from the database.
+    * @param dbPatterns database pattern to match, or null for all databases
+    * @param tablePatterns table pattern to match.
+    * @param tableTypes list of table types to fetch.
+    * @return list of TableMeta objects with information on matching tables
+    * @throws MetaException something went wrong with the fetch from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException No databases match the provided pattern.
+    */
+   List<TableMeta> getTableMeta(String dbPatterns, String tablePatterns, List<String> tableTypes)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Fetches just table name and comments.  Useful when you need full table name
+    * (catalog.database.table) but don't need extra information like partition columns that
+    * require additional fetches from the database.
+    * @param catName catalog to search in.  Search cannot cross catalogs.
+    * @param dbPatterns database pattern to match, or null for all databases
+    * @param tablePatterns table pattern to match.
+    * @param tableTypes list of table types to fetch.
+    * @return list of TableMeta objects with information on matching tables
+    * @throws MetaException something went wrong with the fetch from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException No databases match the provided pattern.
+    */
+   List<TableMeta> getTableMeta(String catName, String dbPatterns, String tablePatterns,
+                                List<String> tableTypes)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Get the names of all tables in the specified database.
+    * @param dbName database name
+    * @return List of table names.
+    * @throws MetaException something went wrong with the fetch from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException No databases match the provided pattern.
+    */
+   List<String> getAllTables(String dbName) throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Get the names of all tables in the specified database.
+    * @param catName catalog name
+    * @param dbName database name
+    * @return List of table names.
+    * @throws MetaException something went wrong with the fetch from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException No databases match the provided pattern.
+    */
+   List<String> getAllTables(String catName, String dbName)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Get a list of table names that match a filter.
+    * The filter operators are LIKE, &lt;, &lt;=, &gt;, &gt;=, =, &lt;&gt;
+    *
+    * In the filter statement, values interpreted as strings must be enclosed in quotes,
+    * while values interpreted as integers should not be.  Strings and integers are the only
+    * supported value types.
+    *
+    * The currently supported key names in the filter are:
+    * Constants.HIVE_FILTER_FIELD_OWNER, which filters on the tables' owner's name
+    *   and supports all filter operators
+    * Constants.HIVE_FILTER_FIELD_LAST_ACCESS, which filters on the last access times
+    *   and supports all filter operators except LIKE
+    * Constants.HIVE_FILTER_FIELD_PARAMS, which filters on the tables' parameter keys and values
+    *   and only supports the filter operators = and &lt;&gt;.
+    *   Append the parameter key name to HIVE_FILTER_FIELD_PARAMS in the filter statement.
+    *   For example, to filter on parameter keys called "retention", the key name in the filter
+    *   statement should be Constants.HIVE_FILTER_FIELD_PARAMS + "retention"
+    *   Also, = and &lt;&gt; only work for keys that exist in the tables.
+    *   E.g., filtering on tables where key1 &lt;&gt; value will only
+    *   return tables that have a value for the parameter key1.
+    * Some example filter statements include:
+    * filter = Constants.HIVE_FILTER_FIELD_OWNER + " like \".*test.*\" and " +
+    *   Constants.HIVE_FILTER_FIELD_LAST_ACCESS + " = 0";
+    * filter = Constants.HIVE_FILTER_FIELD_OWNER + " = \"test_user\" and (" +
+    *   Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"30\" or " +
+    *   Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"90\")"
+    *
+    * @param dbName
+    *          The name of the database from which you will retrieve the table names
+    * @param filter
+    *          The filter string
+    * @param maxTables
+    *          The maximum number of tables returned
+    * @return  A list of table names that match the desired filter
+    * @throws InvalidOperationException invalid filter
+    * @throws UnknownDBException no such database
+    * @throws TException thrift transport error
+    */
+   List<String> listTableNamesByFilter(String dbName, String filter, short maxTables)
+       throws TException, InvalidOperationException, UnknownDBException;
+ 
+   /**
+    * Get a list of table names that match a filter.
+    * The filter operators are LIKE, &lt;, &lt;=, &gt;, &gt;=, =, &lt;&gt;
+    *
+    * In the filter statement, values interpreted as strings must be enclosed in quotes,
+    * while values interpreted as integers should not be.  Strings and integers are the only
+    * supported value types.
+    *
+    * The currently supported key names in the filter are:
+    * Constants.HIVE_FILTER_FIELD_OWNER, which filters on the tables' owner's name
+    *   and supports all filter operators
+    * Constants.HIVE_FILTER_FIELD_LAST_ACCESS, which filters on the last access times
+    *   and supports all filter operators except LIKE
+    * Constants.HIVE_FILTER_FIELD_PARAMS, which filters on the tables' parameter keys and values
+    *   and only supports the filter operators = and &lt;&gt;.
+    *   Append the parameter key name to HIVE_FILTER_FIELD_PARAMS in the filter statement.
+    *   For example, to filter on parameter keys called "retention", the key name in the filter
+    *   statement should be Constants.HIVE_FILTER_FIELD_PARAMS + "retention"
+    *   Also, = and &lt;&gt; only work for keys that exist in the tables.
+    *   E.g., filtering on tables where key1 &lt;&gt; value will only
+    *   return tables that have a value for the parameter key1.
+    * Some example filter statements include:
+    * filter = Constants.HIVE_FILTER_FIELD_OWNER + " like \".*test.*\" and " +
+    *   Constants.HIVE_FILTER_FIELD_LAST_ACCESS + " = 0";
+    * filter = Constants.HIVE_FILTER_FIELD_OWNER + " = \"test_user\" and (" +
+    *   Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"30\" or " +
+    *   Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"90\")"
+    *
+    * @param catName catalog name
+    * @param dbName
+    *          The name of the database from which you will retrieve the table names
+    * @param filter
+    *          The filter string
+    * @param maxTables
+    *          The maximum number of tables returned
+    * @return  A list of table names that match the desired filter
+    * @throws InvalidOperationException invalid filter
+    * @throws UnknownDBException no such database
+    * @throws TException thrift transport error
+    */
+   List<String> listTableNamesByFilter(String catName, String dbName, String filter, int maxTables)
+       throws TException, InvalidOperationException, UnknownDBException;
+ 
+   /**
+    * Drop the table.
+    *
+    * @param dbname
+    *          The database for this table
+    * @param tableName
+    *          The table to drop
+    * @param deleteData
+    *          Should we delete the underlying data
+    * @param ignoreUnknownTab
+    *          don't throw if the requested table doesn't exist
+    * @throws MetaException
+    *           Could not drop table properly.
+    * @throws NoSuchObjectException
+    *           The table wasn't found.
+    * @throws TException
+    *           A thrift communication error occurred
+    *
+    */
+   void dropTable(String dbname, String tableName, boolean deleteData,
+       boolean ignoreUnknownTab) throws MetaException, TException,
+       NoSuchObjectException;
+ 
+   /**
+    * Drop the table.
+    *
+    * @param dbname
+    *          The database for this table
+    * @param tableName
+    *          The table to drop
+    * @param deleteData
+    *          Should we delete the underlying data
+    * @param ignoreUnknownTab
+    *          don't throw if the requested table doesn't exist
+    * @param ifPurge
+    *          completely purge the table (skipping trash) while removing data from warehouse
+    * @throws MetaException
+    *           Could not drop table properly.
+    * @throws NoSuchObjectException
+    *           The table wasn't found.
+    * @throws TException
+    *           A thrift communication error occurred
+    */
+   void dropTable(String dbname, String tableName, boolean deleteData,
+       boolean ignoreUnknownTab, boolean ifPurge) throws MetaException, TException,
+       NoSuchObjectException;
+ 
+   /**
+    * Drop the table.
+    *
+    * @param dbname
+    *          The database for this table
+    * @param tableName
+    *          The table to drop
+    * @throws MetaException
+    *           Could not drop table properly.
+    * @throws NoSuchObjectException
+    *           The table wasn't found.
+    * @throws TException
+    *           A thrift communication error occurred
+    */
+   void dropTable(String dbname, String tableName)
+       throws MetaException, TException, NoSuchObjectException;
+ 
+   /**
+    * Drop a table.
+    * @param catName catalog the table is in.
+    * @param dbName database the table is in.
+    * @param tableName table name.
+    * @param deleteData whether associated data should be deleted.
+    * @param ignoreUnknownTable whether a non-existent table name should be ignored
+    * @param ifPurge whether dropped data should be immediately removed rather than placed in HDFS
+    *               trash.
+    * @throws MetaException something went wrong, usually in the RDBMS or storage.
+    * @throws NoSuchObjectException No table of this name exists, only thrown if
+    * ignoreUnknownTable is false.
+    * @throws TException general thrift error.
+    */
+   void dropTable(String catName, String dbName, String tableName, boolean deleteData,
+                  boolean ignoreUnknownTable, boolean ifPurge)
+     throws MetaException, NoSuchObjectException, TException;
+ 
+   /**
+    * Drop a table.  Equivalent to
+    * {@link #dropTable(String, String, String, boolean, boolean, boolean)} with ifPurge set to
+    * false.
+    * @param catName catalog the table is in.
+    * @param dbName database the table is in.
+    * @param tableName table name.
+    * @param deleteData whether associated data should be deleted.
+    * @param ignoreUnknownTable whether a non-existent table name should be ignored
+    * @throws MetaException something went wrong, usually in the RDBMS or storage.
+    * @throws NoSuchObjectException No table of this name exists, only thrown if
+    * ignoreUnknownTable is false.
+    * @throws TException general thrift error.
+    */
+   default void dropTable(String catName, String dbName, String tableName, boolean deleteData,
+                          boolean ignoreUnknownTable)
+     throws MetaException, NoSuchObjectException, TException {
+     dropTable(catName, dbName, tableName, deleteData, ignoreUnknownTable, false);
+   }
+ 
+   /**
+    * Drop a table.  Equivalent to
+    * {@link #dropTable(String, String, String, boolean, boolean, boolean)} with deleteData
+    * set and ignoreUnknownTable set to true and ifPurge set to false.
+    * @param catName catalog the table is in.
+    * @param dbName database the table is in.
+    * @param tableName table name.
+    * @throws MetaException something went wrong, usually in the RDBMS or storage.
+    * @throws NoSuchObjectException No table of this name exists, only thrown if
+    * ignoreUnknownTable is false.
+    * @throws TException general thrift error.
+    */
+   default void dropTable(String catName, String dbName, String tableName)
+       throws MetaException, NoSuchObjectException, TException {
+     dropTable(catName, dbName, tableName, true, true, false);
+   }
+ 
+   /**
+    * Truncate the table/partitions in the DEFAULT database.
+    * @param dbName
+    *          The db to which the table to be truncate belongs to
+    * @param tableName
+    *          The table to truncate
+    * @param partNames
+    *          List of partitions to truncate. NULL will truncate the whole table/all partitions
+    * @throws MetaException Failure in the RDBMS or storage
+    * @throws TException Thrift transport exception
+    */
+   void truncateTable(String dbName, String tableName, List<String> partNames) throws MetaException, TException;
+ 
++  void truncateTable(String dbName, String tableName, List<String> partNames,
++      long txnId, String validWriteIds, long writeId) throws TException;
++
+   /**
+    * Truncate the table/partitions in the DEFAULT database.
+    * @param catName catalog name
+    * @param dbName
+    *          The db to which the table to be truncate belongs to
+    * @param tableName
+    *          The table to truncate
+    * @param partNames
+    *          List of partitions to truncate. NULL will truncate the whole table/all partitions
+    * @throws MetaException Failure in the RDBMS or storage
+    * @throws TException Thrift transport exception
+    */
+   void truncateTable(String catName, String dbName, String tableName, List<String> partNames)
+       throws MetaException, TException;
+ 
+   /**
+    * Recycles the files recursively from the input path to the cmroot directory either by copying or moving it.
+    *
+    * @param request Inputs for path of the data files to be recycled to cmroot and
+    *                isPurge flag when set to true files which needs to be recycled are not moved to Trash
+    * @return Response which is currently void
+    */
+   CmRecycleResponse recycleDirToCmPath(CmRecycleRequest request) throws MetaException, TException;
+ 
+   /**
+    * Check whether a table exists in the default catalog.
+    * @param databaseName database name
+    * @param tableName table name
+    * @return true if the indicated table exists, false if not
+    * @throws MetaException error fetching form the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException the indicated database does not exist.
+    */
+   boolean tableExists(String databaseName, String tableName)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Check whether a table exists.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @return true if the indicated table exists, false if not
+    * @throws MetaException error fetching form the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException the indicated database does not exist.
+    */
+   boolean tableExists(String catName, String dbName, String tableName)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Get a Database Object in the default catalog
+    * @param databaseName  name of the database to fetch
+    * @return the database
+    * @throws NoSuchObjectException The database does not exist
+    * @throws MetaException Could not fetch the database
+    * @throws TException A thrift communication error occurred
+    */
+   Database getDatabase(String databaseName)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get a database.
+    * @param catalogName catalog name.  Can be null, in which case
+    * {@link Warehouse#DEFAULT_CATALOG_NAME} will be assumed.
+    * @param databaseName database name
+    * @return the database object
+    * @throws NoSuchObjectException No database with this name exists in the specified catalog
+    * @throws MetaException something went wrong, usually in the RDBMS
+    * @throws TException general thrift error
+    */
+   Database getDatabase(String catalogName, String databaseName)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get a table object in the default catalog.
+    *
+    * @param dbName
+    *          The database the table is located in.
+    * @param tableName
+    *          Name of the table to fetch.
+    * @return An object representing the table.
+    * @throws MetaException
+    *           Could not fetch the table
+    * @throws TException
+    *           A thrift communication error occurred
+    * @throws NoSuchObjectException
+    *           In case the table wasn't found.
+    */
+   Table getTable(String dbName, String tableName) throws MetaException,
+       TException, NoSuchObjectException;
+ 
++  Table getTable(String dbName, String tableName,
++                 long txnId, String validWriteIdList)
++      throws MetaException, TException, NoSuchObjectException;
++
+   /**
+    * Get a table object.
+    * @param catName catalog the table is in.
+    * @param dbName database the table is in.
+    * @param tableName table name.
+    * @return table object.
+    * @throws MetaException Something went wrong, usually in the RDBMS.
+    * @throws TException general thrift error.
+    */
+   Table getTable(String catName, String dbName, String tableName) throws MetaException, TException;
+ 
++  Table getTable(String catName, String dbName, String tableName,
++                        long txnId, String validWriteIdList) throws TException;
+   /**
+    * Get tables as objects (rather than just fetching their names).  This is more expensive and
+    * should only be used if you actually need all the information about the tables.
+    * @param dbName
+    *          The database the tables are located in.
+    * @param tableNames
+    *          The names of the tables to fetch
+    * @return A list of objects representing the tables.
+    *          Only the tables that can be retrieved from the database are returned.  For example,
+    *          if none of the requested tables could be retrieved, an empty list is returned.
+    *          There is no guarantee of ordering of the returned tables.
+    * @throws InvalidOperationException
+    *          The input to this operation is invalid (e.g., the list of tables names is null)
+    * @throws UnknownDBException
+    *          The requested database could not be fetched.
+    * @throws TException
+    *          A thrift communication error occurred
+    * @throws MetaException
+    *          Any other errors
+    */
+   List<Table> getTableObjectsByName(String dbName, List<String> tableNames)
+       throws MetaException, InvalidOperationException, UnknownDBException, TException;
+ 
+   /**
+    * Get tables as objects (rather than just fetching their names).  This is more expensive and
+    * should only be used if you actually need all the information about the tables.
+    * @param catName catalog name
+    * @param dbName
+    *          The database the tables are located in.
+    * @param tableNames
+    *          The names of the tables to fetch
+    * @return A list of objects representing the tables.
+    *          Only the tables that can be retrieved from the database are returned.  For example,
+    *          if none of the requested tables could be retrieved, an empty list is returned.
+    *          There is no guarantee of ordering of the returned tables.
+    * @throws InvalidOperationException
+    *          The input to this operation is invalid (e.g., the list of tables names is null)
+    * @throws UnknownDBException
+    *          The requested database could not be fetched.
+    * @throws TException
+    *          A thrift communication error occurred
+    * @throws MetaException
+    *          Any other errors
+    */
+   List<Table> getTableObjectsByName(String catName, String dbName, List<String> tableNames)
+       throws MetaException, InvalidOperationException, UnknownDBException, TException;
+ 
+   /**
+    * Returns the invalidation information for the materialized views given as input.
+    */
+   Materialization getMaterializationInvalidationInfo(CreationMetadata cm, String validTxnList)
+       throws MetaException, InvalidOperationException, UnknownDBException, TException;
+ 
+   /**
+    * Updates the creation metadata for the materialized view.
+    */
+   void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm)
+       throws MetaException, TException;
+ 
+   /**
+    * Updates the creation metadata for the materialized view.
+    */
+   void updateCreationMetadata(String catName, String dbName, String tableName, CreationMetadata cm)
+       throws MetaException, TException;
+ 
+   /**
+   /**
+    * Add a partition to a table and get back the resulting Partition object.  This creates an
+    * empty default partition with just the partition values set.
+    * @param dbName database name
+    * @param tableName table name
+    * @param partVals partition values
+    * @return the partition object
+    * @throws InvalidObjectException no such table
+    * @throws AlreadyExistsException a partition with these values already exists
+    * @throws MetaException error accessing the RDBMS
+    * @throws TException thrift transport error
+    */
+   Partition appendPartition(String dbName, String tableName, List<String> partVals)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Add a partition to a table and get back the resulting Partition object.  This creates an
+    * empty default partition with just the partition values set.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param partVals partition values
+    * @return the partition object
+    * @throws InvalidObjectException no such table
+    * @throws AlreadyExistsException a partition with these values already exists
+    * @throws MetaException error accessing the RDBMS
+    * @throws TException thrift transport error
+    */
+   Partition appendPartition(String catName, String dbName, String tableName, List<String> partVals)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Add a partition to a table and get back the resulting Partition object.  This creates an
+    * empty default partition with just the partition value set.
+    * @param dbName database name.
+    * @param tableName table name.
+    * @param name name of the partition, should be in the form partkey=partval.
+    * @return new partition object.
+    * @throws InvalidObjectException No such table.
+    * @throws AlreadyExistsException Partition of this name already exists.
+    * @throws MetaException error accessing the RDBMS
+    * @throws TException thrift transport error
+    */
+   Partition appendPartition(String dbName, String tableName, String name)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Add a partition to a table and get back the resulting Partition object.  This creates an
+    * empty default partition with just the partition value set.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tableName table name.
+    * @param name name of the partition, should be in the form partkey=partval.
+    * @return new partition object.
+    * @throws InvalidObjectException No such table.
+    * @throws AlreadyExistsException Partition of this name already exists.
+    * @throws MetaException error accessing the RDBMS
+    * @throws TException thrift transport error
+    */
+   Partition appendPartition(String catName, String dbName, String tableName, String name)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Add a partition to the table.
+    *
+    * @param partition
+    *          The partition to add
+    * @return The partition added
+    * @throws InvalidObjectException
+    *           Could not find table to add to
+    * @throws AlreadyExistsException
+    *           Partition already exists
+    * @throws MetaException
+    *           Could not add partition
+    * @throws TException
+    *           Thrift exception
+    */
+   Partition add_partition(Partition partition)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Add partitions to the table.
+    *
+    * @param partitions
+    *          The partitions to add
+    * @throws InvalidObjectException
+    *           Could not find table to add to
+    * @throws AlreadyExistsException
+    *           Partition already exists
+    * @throws MetaException
+    *           Could not add partition
+    * @throws TException
+    *           Thrift exception
+    */
+   int add_partitions(List<Partition> partitions)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Add a partitions using a spec proxy.
+    * @param partitionSpec partition spec proxy
+    * @return number of partitions that were added
+    * @throws InvalidObjectException the partitionSpec is malformed.
+    * @throws AlreadyExistsException one or more of the partitions already exist.
+    * @throws MetaException error accessing the RDBMS or storage.
+    * @throws TException thrift transport error
+    */
+   int add_partitions_pspec(PartitionSpecProxy partitionSpec)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Add partitions to the table.
+    *
+    * @param partitions The partitions to add
+    * @param ifNotExists only add partitions if they don't exist
+    * @param needResults Whether the results are needed
+    * @return the partitions that were added, or null if !needResults
+    */
+   List<Partition> add_partitions(
+       List<Partition> partitions, boolean ifNotExists, boolean needResults)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Get a partition.
+    * @param dbName database name
+    * @param tblName table name
+    * @param partVals partition values for this partition, must be in the same order as the
+    *                 partition keys of the table.
+    * @return the partition object
+    * @throws NoSuchObjectException no such partition
+    * @throws MetaException error access the RDBMS.
+    * @throws TException thrift transport error
+    */
+   Partition getPartition(String dbName, String tblName, List<String> partVals)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get a partition.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tblName table name
+    * @param partVals partition values for this partition, must be in the same order as the
+    *                 partition keys of the table.
+    * @return the partition object
+    * @throws NoSuchObjectException no such partition
+    * @throws MetaException error access the RDBMS.
+    * @throws TException thrift transport error
+    */
+   Partition getPartition(String catName, String dbName, String tblName, List<String> partVals)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Move a partition from one table to another
+    * @param partitionSpecs key value pairs that describe the partition to be moved.
+    * @param sourceDb database of the source table
+    * @param sourceTable name of the source table
+    * @param destdb database of the destination table
+    * @param destTableName name of the destination table
+    * @return partition object
+    * @throws MetaException error accessing the RDBMS or storage
+    * @throws NoSuchObjectException no such table, for either source or destination table
+    * @throws InvalidObjectException error in partition specifications
+    * @throws TException thrift transport error
+    */
+   Partition exchange_partition(Map<String, String> partitionSpecs,
+       String sourceDb, String sourceTable, String destdb,
+       String destTableName) throws MetaException, NoSuchObjectException,
+       InvalidObjectException, TException;
+ 
+   /**
+    * Move a partition from one table to another
+    * @param partitionSpecs key value pairs that describe the partition to be moved.
+    * @param sourceCat catalog of the source table
+    * @param sourceDb database of the source table
+    * @param sourceTable name of the source table
+    * @param destCat catalog of the destination table, for now must the same as sourceCat
+    * @param destdb database of the destination table
+    * @param destTableName name of the destination table
+    * @return partition object
+    * @throws MetaException error accessing the RDBMS or storage
+    * @throws NoSuchObjectException no such table, for either source or destination table
+    * @throws InvalidObjectException error in partition specifications
+    * @throws TException thrift transport error
+    */
+   Partition exchange_partition(Map<String, String> partitionSpecs, String sourceCat,
+                                String sourceDb, String sourceTable, String destCat, String destdb,
+                                String destTableName) throws MetaException, NoSuchObjectException,
+       InvalidObjectException, TException;
+ 
+   /**
+    * With the one partitionSpecs to exchange, multiple partitions could be exchanged.
+    * e.g., year=2015/month/day, exchanging partition year=2015 results to all the partitions
+    * belonging to it exchanged. This function returns the list of affected partitions.
+    * @param partitionSpecs key value pairs that describe the partition(s) to be moved.
+    * @param sourceDb database of the source table
+    * @param sourceTable name of the source table
+    * @param destdb database of the destination table
+    * @param destTableName name of the destination table
+    * @throws MetaException error accessing the RDBMS or storage
+    * @throws NoSuchObjectException no such table, for either source or destination table
+    * @throws InvalidObjectException error in partition specifications
+    * @throws TException thrift transport error
+    * @return the list of the new partitions
+    */
+   List<Partition> exchange_partitions(Map<String, String> partitionSpecs,
+       String sourceDb, String sourceTable, String destdb,
+       String destTableName) throws MetaException, NoSuchObjectException,
+       InvalidObjectException, TException;
+ 
+   /**
+    * With the one partitionSpecs to exchange, multiple partitions could be exchanged.
+    * e.g., year=2015/month/day, exchanging partition year=2015 results to all the partitions
+    * belonging to it exchanged. This function returns the list of affected partitions.
+    * @param partitionSpecs key value pairs that describe the partition(s) to be moved.
+    * @param sourceCat catalog of the source table
+    * @param sourceDb database of the source table
+    * @param sourceTable name of the source table
+    * @param destCat catalog of the destination table, for now must the same as sourceCat
+    * @param destdb database of the destination table
+    * @param destTableName name of the destination table
+    * @throws MetaException error accessing the RDBMS or storage
+    * @throws NoSuchObjectException no such table, for either source or destination table
+    * @throws InvalidObjectException error in partition specifications
+    * @throws TException thrift transport error
+    * @return the list of the new partitions
+    */
+   List<Partition> exchange_partitions(Map<String, String> partitionSpecs, String sourceCat,
+                                       String sourceDb, String sourceTable, String destCat,
+                                       String destdb, String destTableName)
+       throws MetaException, NoSuchObjectException, InvalidObjectException, TException;
+ 
+   /**
+    * Get a Partition by name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param name - partition name i.e. 'ds=2010-02-03/ts=2010-02-03 18%3A16%3A01'
+    * @return the partition object
+    * @throws MetaException error access the RDBMS.
+    * @throws TException thrift transport error
+    */
+   Partition getPartition(String dbName, String tblName, String name)
+       throws MetaException, UnknownTableException, NoSuchObjectException, TException;
+ 
+   /**
+    * Get a Partition by name.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param name - partition name i.e. 'ds=2010-02-03/ts=2010-02-03 18%3A16%3A01'
+    * @return the partition object
+    * @throws MetaException error access the RDBMS.
+    * @throws TException thrift transport error
+    */
+   Partition getPartition(String catName, String dbName, String tblName, String name)
+       throws MetaException, UnknownTableException, NoSuchObjectException, TException;
+ 
+ 
+   /**
+    * Get a Partition along with authorization information.
+    * @param dbName database name
+    * @param tableName table name
+    * @param pvals partition values, must be in the same order as the tables partition keys
+    * @param userName name of the calling user
+    * @param groupNames groups the call
+    * @return the partition
+    * @throws MetaException error accessing the RDBMS
+    * @throws UnknownTableException no such table
+    * @throws NoSuchObjectException no such partition
+    * @throws TException thrift transport error
+    */
+   Partition getPartitionWithAuthInfo(String dbName, String tableName,
+       List<String> pvals, String userName, List<String> groupNames)
+       throws MetaException, UnknownTableException, NoSuchObjectException, TException;
+ 
+   /**
+    * Get a Partition along with authorization information.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param pvals partition values, must be in the same order as the tables partition keys
+    * @param userName name of the calling user
+    * @param groupNames groups the call
+    * @return the partition
+    * @throws MetaException error accessing the RDBMS
+    * @throws UnknownTableException no such table
+    * @throws NoSuchObjectException no such partition
+    * @throws TException thrift transport error
+    */
+   Partition getPartitionWithAuthInfo(String catName, String dbName, String tableName,
+                                      List<String> pvals, String userName, List<String> groupNames)
+       throws MetaException, UnknownTableException, NoSuchObjectException, TException;
+ 
+   /**
+    * Get a list of partittions for a table.
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param max_parts maximum number of parts to return, -1 for all
+    * @return the list of partitions
+    * @throws NoSuchObjectException No such table.
+    * @throws MetaException error accessing RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<Partition> listPartitions(String db_name, String tbl_name, short max_parts)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get a list of partittions for a table.
+    * @param catName catalog name
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param max_parts maximum number of parts to return, -1 for all
+    * @return the list of partitions
+    * @throws NoSuchObjectException No such table.
+    * @throws MetaException error accessing RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<Partition> listPartitions(String catName, String db_name, String tbl_name, int max_parts)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get a list of partitions from a table, returned in the form of PartitionSpecProxy
+    * @param dbName database name.
+    * @param tableName table name.
+    * @param maxParts maximum number of partitions to return, or -1 for all
+    * @return a PartitionSpecProxy
+    * @throws TException thrift transport error
+    */
+   PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts)
+     throws TException;
+ 
+   /**
+    * Get a list of partitions from a table, returned in the form of PartitionSpecProxy
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tableName table name.
+    * @param maxParts maximum number of partitions to return, or -1 for all
+    * @return a PartitionSpecProxy
+    * @throws TException thrift transport error
+    */
+   PartitionSpecProxy listPartitionSpecs(String catName, String dbName, String tableName,
+                                         int maxParts) throws TException;
+ 
+   /**
+    * Get a list of partitions based on a (possibly partial) list of partition values.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param part_vals partition values, in order of the table partition keys.  These can be
+    *                  partial, or .* to match all values for a particular key.
+    * @param max_parts maximum number of partitions to return, or -1 for all.
+    * @return list of partitions
+    * @throws NoSuchObjectException no such table.
+    * @throws MetaException error accessing the database or processing the partition values.
+    * @throws TException thrift transport error.
+    */
+   List<Partition> listPartitions(String db_name, String tbl_name,
+       List<String> part_vals, short max_parts) throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get a list of partitions based on a (possibly partial) list of partition values.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param part_vals partition values, in order of the table partition keys.  These can be
+    *                  partial, or .* to match all values for a particular key.
+    * @param max_parts maximum number of partitions to return, or -1 for all.
+    * @return list of partitions
+    * @throws NoSuchObjectException no such table.
+    * @throws MetaException error accessing the database or processing the partition values.
+    * @throws TException thrift transport error.
+    */
+   List<Partition> listPartitions(String catName, String db_name, String tbl_name,
+                                  List<String> part_vals, int max_parts)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * List Names of partitions in a table.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param max_parts maximum number of parts of fetch, or -1 to fetch them all.
+    * @return list of partition names.
+    * @throws NoSuchObjectException No such table.
+    * @throws MetaException Error accessing the RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<String> listPartitionNames(String db_name, String tbl_name,
+       short max_parts) throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * List Names of partitions in a table.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param max_parts maximum number of parts of fetch, or -1 to fetch them all.
+    * @return list of partition names.
+    * @throws NoSuchObjectException No such table.
+    * @throws MetaException Error accessing the RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<String> listPartitionNames(String catName, String db_name, String tbl_name,
+                                   int max_parts) throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get a list of partition names matching a partial specification of the partition values.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param part_vals partial list of partition values.  These must be given in the order of the
+    *                  partition keys.  If you wish to accept any value for a particular key you
+    *                  can pass ".*" for that value in this list.
+    * @param max_parts maximum number of partition names to return, or -1 to return all that are
+    *                  found.
+    * @return list of matching partition names.
+    * @throws MetaException error accessing the RDBMS.
+    * @throws TException thrift transport error.
+    * @throws NoSuchObjectException no such table.
+    */
+   List<String> listPartitionNames(String db_name, String tbl_name,
+       List<String> part_vals, short max_parts)
+       throws MetaException, TException, NoSuchObjectException;
+ 
+   /**
+    * Get a list of partition names matching a partial specification of the partition values.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param part_vals partial list of partition values.  These must be given in the order of the
+    *                  partition keys.  If you wish to accept any value for a particular key you
+    *                  can pass ".*" for that value in this list.
+    * @param max_parts maximum number of partition names to return, or -1 to return all that are
+    *                  found.
+    * @return list of matching partition names.
+    * @throws MetaException error accessing the RDBMS.
+    * @throws TException thrift transport error.
+    * @throws NoSuchObjectException no such table.
+    */
+   List<String> listPartitionNames(String catName, String db_name, String tbl_name,
+                                   List<String> part_vals, int max_parts)
+       throws MetaException, TException, NoSuchObjectException;
+ 
+   /**
+    * Get a list of partition values
+    * @param request request
+    * @return reponse
+    * @throws MetaException error accessing RDBMS
+    * @throws TException thrift transport error
+    * @throws NoSuchObjectException no such table
+    */
+   PartitionValuesResponse listPartitionValues(PartitionValuesRequest request)
+       throws MetaException, TException, NoSuchObjectException;
+ 
+   /**
+    * Get number of partitions matching specified filter
+    * @param dbName the database name
+    * @param tableName the table name
+    * @param filter the filter string,
+    *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". Filtering can
+    *    be done only on string partition keys.
+    * @return number of partitions
+    * @throws MetaException error accessing RDBMS or processing the filter
+    * @throws NoSuchObjectException no such table
+    * @throws TException thrift transport error
+    */
+   int getNumPartitionsByFilter(String dbName, String tableName,
+                                String filter) throws MetaException, NoSuchObjectException, TException;
+ 
+   /**
+    * Get number of partitions matching specified filter
+    * @param catName catalog name
+    * @param dbName the database name
+    * @param tableName the table name
+    * @param filter the filter string,
+    *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". Filtering can
+    *    be done only on string partition keys.
+    * @return number of partitions
+    * @throws MetaException error accessing RDBMS or processing the filter
+    * @throws NoSuchObjectException no such table
+    * @throws TException thrift transport error
+    */
+   int getNumPartitionsByFilter(String catName, String dbName, String tableName,
+                                String filter) throws MetaException, NoSuchObjectException, TException;
+ 
+ 
+   /**
+    * Get list of partitions matching specified filter
+    * @param db_name the database name
+    * @param tbl_name the table name
+    * @param filter the filter string,
+    *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". Filtering can
+    *    be done only on string partition keys.
+    * @param max_parts the maximum number of partitions to return,
+    *    all partitions are returned if -1 is passed
+    * @return list of partitions
+    * @throws MetaException Error accessing the RDBMS or processing the filter.
+    * @throws NoSuchObjectException No such table.
+    * @throws TException thrift transport error
+    */
+   List<Partition> listPartitionsByFilter(String db_name, String tbl_name,
+       String filter, short max_parts) throws MetaException, NoSuchObjectException, TException;
+ 
+   /**
+    * Get list of partitions matching specified filter
+    * @param catName catalog name.
+    * @param db_name the database name
+    * @param tbl_name the table name
+    * @param filter the filter string,
+    *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". Filtering can
+    *    be done only on string partition keys.
+    * @param max_parts the maximum number of partitions to return,
+    *    all partitions are returned if -1 is passed
+    * @return list of partitions
+    * @throws MetaException Error accessing the RDBMS or processing the filter.
+    * @throws NoSuchObjectException No such table.
+    * @throws TException thrift transport error
+    */
+   List<Partition> listPartitionsByFilter(String catName, String db_name, String tbl_name,
+                                          String filter, int max_parts)
+       throws MetaException, NoSuchObjectException, TException;
+ 
+   /**
+    * Get a list of partitions in a PartitionSpec, using a filter to select which partitions to
+    * fetch.
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param filter SQL where clause filter
+    * @param max_parts maximum number of partitions to fetch, or -1 for all
+    * @return PartitionSpec
+    * @throws MetaException error accessing RDBMS or processing the filter
+    * @throws NoSuchObjectException No table matches the request
+    * @throws TException thrift transport error
+    */
+   PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name,
+                                                 String filter, int max_parts)
+       throws MetaException, NoSuchObjectException, TException;
+ 
+   /**
+    * Get a list of partitions in a PartitionSpec, using a filter to select which partitions to
+    * fetch.
+    * @param catName catalog name
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param filter SQL where clause filter
+    * @param max_parts maximum number of partitions to fetch, or -1 for all
+    * @return PartitionSpec
+    * @throws MetaException error accessing RDBMS or processing the filter
+    * @throws NoSuchObjectException No table matches the request
+    * @throws TException thrift transport error
+    */
+   PartitionSpecProxy listPartitionSpecsByFilter(String catName, String db_name, String tbl_name,
+                                                 String filter, int max_parts)
+       throws MetaException, NoSuchObjectException, TException;
+ 
+   /**
+    * Get list of partitions matching specified serialized expression
+    * @param db_name the database name
+    * @param tbl_name the table name
+    * @param expr expression, serialized from ExprNodeDesc
+    * @param max_parts the maximum number of partitions to return,
+    *    all partitions are returned if -1 is passed
+    * @param default_partition_name Default partition name from configuration. If blank, the
+    *    metastore server-side configuration is used.
+    * @param result the resulting list of partitions
+    * @return whether the resulting list contains partitions which may or may not match the expr
+    * @throws TException thrift transport error or error executing the filter.
+    */
+   boolean listPartitionsByExpr(String db_name, String tbl_name,
+       byte[] expr, String default_partition_name, short max_parts, List<Partition> result)
+           throws TException;
+ 
+   /**
+    * Get list of partitions matching specified serialized expression
+    * @param catName catalog name
+    * @param db_name the database name
+    * @param tbl_name the table name
+    * @param expr expression, serialized from ExprNodeDesc
+    * @param max_parts the maximum number of partitions to return,
+    *    all partitions are returned if -1 is passed
+    * @param default_partition_name Default partition name from configuration. If blank, the
+    *    metastore server-side configuration is used.
+    * @param result the resulting list of partitions
+    * @return whether the resulting list contains partitions which may or may not match the expr
+    * @throws TException thrift transport error or error executing the filter.
+    */
+   boolean listPartitionsByExpr(String catName, String db_name, String tbl_name, byte[] expr,
+                                String default_partition_name, int max_parts, List<Partition> result)
+       throws TException;
+ 
+   /**
+    * List partitions, fetching the authorization information along with the partitions.
+    * @param dbName database name
+    * @param tableName table name
+    * @param maxParts maximum number of partitions to fetch, or -1 for all
+    * @param userName user to fetch privileges for
+    * @param groupNames groups to fetch privileges for
+    * @return the list of partitions
+    * @throws NoSuchObjectException no partitions matching the criteria were found
+    * @throws MetaException error accessing the RDBMS
+    * @throws TException thrift transport error
+    */
+   List<Partition> listPartitionsWithAuthInfo(String dbName,
+       String tableName, short maxParts, String userName, List<String> groupNames)
+       throws MetaException, TException, NoSuchObjectException;
+ 
+   /**
+    * List partitions, fetching the authorization information along with the partitions.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param maxParts maximum number of partitions to fetch, or -1 for all
+    * @param userName user to fetch privileges for
+    * @param groupNames groups to fetch privileges for
+    * @return the list of partitions
+    * @throws NoSuchObjectException no partitions matching the criteria were found
+    * @throws MetaException error accessing the RDBMS
+    * @throws TException thrift transport error
+    */
+   List<Partition> listPartitionsWithAuthInfo(String catName, String dbName, String tableName,
+                                              int maxParts, String userName, List<String> groupNames)
+       throws MetaException, TException, NoSuchObjectException;
+ 
+   /**
+    * Get partitions by a list of partition names.
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param part_names list of partition names
+    * @return list of Partition objects
+    * @throws NoSuchObjectException No such partitions
+    * @throws MetaException error accessing the RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<Partition> getPartitionsByNames(String db_name, String tbl_name,
+       List<String> part_names) throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get partitions by a list of partition names.
+    * @param catName catalog name
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param part_names list of partition names
+    * @return list of Partition objects
+    * @throws NoSuchObjectException No such partitions
+    * @throws MetaException error accessing the RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<Partition> getPartitionsByNames(String catName, String db_name, String tbl_name,
+                                        List<String> part_names)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * List partitions along with privilege information for a user or groups
+    * @param dbName database name
+    * @param tableName table name
+    * @param partialPvals partition values, can be partial
+    * @param maxParts maximum number of partitions to fetch, or -1 for all
+    * @param userName user to fetch privilege information for
+    * @param groupNames group to fetch privilege information for
+    * @return the list of partitions
+    * @throws NoSuchObjectException no partitions matching the criteria were found
+    * @throws MetaException error accessing the RDBMS
+    * @throws TException thrift transport error
+    */
+   List<Partition> listPartitionsWithAuthInfo(String dbName,
+       String tableName, List<String> partialPvals, short maxParts, String userName,
+       List<String> groupNames) throws MetaException, TException, NoSuchObjectException;
+ 
+   /**
+    * List partitions along with privilege information for a user or groups
+    * @param dbName database name
+    * @param tableName table name
+    * @param partialPvals partition values, can be partial
+    * @param maxParts maximum number of partitions to fetch, or -1 for all
+    * @param userName user to fetch privilege information for
+    * @param groupNames group to fetch privilege information for
+    * @return the list of partitions
+    * @throws NoSuchObjectException no partitions matching the criteria were found
+    * @throws MetaException error accessing the RDBMS
+    * @throws TException thrift transport error
+    */
+   List<Partition> listPartitionsWithAuthInfo(String catName, String dbName, String tableName,
+                                              List<String> partialPvals, int maxParts, String userName,
+                                              List<String> groupNames)
+       throws MetaException, TException, NoSuchObjectException;
+ 
+   /**
+    * Mark an event as having occurred on a partition.
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param partKVs key value pairs that describe the partition
+    * @param eventType type of the event
+    * @throws MetaException error access the RDBMS
+    * @throws NoSuchObjectException never throws this AFAICT
+    * @throws TException thrift transport error
+    * @throws UnknownTableException no such table
+    * @throws UnknownDBException no such database
+    * @throws UnknownPartitionException no such partition
+    * @throws InvalidPartitionException partition partKVs is invalid
+    */
+   void markPartitionForEvent(String db_name, String tbl_name, Map<String,String> partKVs,
+       PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException,
+       UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException;
+ 
+   /**
+    * Mark an event as having occurred on a partition.
+    * @param catName catalog name
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param partKVs key value pairs that describe the partition
+    * @param eventType type of the event
+    * @throws MetaException error access the RDBMS
+    * @throws NoSuchObjectException never throws this AFAICT
+    * @throws TException thrift transport error
+    * @throws UnknownTableException no such table
+    * @throws UnknownDBException no such database
+    * @throws UnknownPartitionException no such partition
+    * @throws InvalidPartitionException partition partKVs is invalid
+    */
+   void markPartitionForEvent(String catName, String db_name, String tbl_name, Map<String,String> partKVs,
+                              PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException,
+       UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException;
+ 
+   /**
+    * Determine whether a partition has been marked with a particular event type.
+    * @param db_name database name
+    * @param tbl_name table name.
+    * @param partKVs key value pairs that describe the partition.
+    * @param eventType event type
+    * @throws MetaException error access the RDBMS
+    * @throws NoSuchObjectException never throws this AFAICT
+    * @throws TException thrift transport error
+    * @throws UnknownTableException no such table
+    * @throws UnknownDBException no such database
+    * @throws UnknownPartitionException no such partition
+    * @throws InvalidPartitionException partition partKVs is invalid
+    */
+   boolean isPartitionMarkedForEvent(String db_name, String tbl_name, Map<String,String> partKVs,
+       PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException,
+       UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException;
+ 
+   /**
+    * Determine whether a partition has been marked with a particular event type.
+    * @param catName catalog name
+    * @param db_name database name
+    * @param tbl_name table name.
+    * @param partKVs key value pairs that describe the partition.
+    * @param eventType event type
+    * @throws MetaException error access the RDBMS
+    * @throws NoSuchObjectException never throws this AFAICT
+    * @throws TException thrift transport error
+    * @throws UnknownTableException no such table
+    * @throws UnknownDBException no such database
+    * @throws UnknownPartitionException no such partition
+    * @throws InvalidPartitionException partition partKVs is invalid
+    */
+   boolean isPartitionMarkedForEvent(String catName, String db_name, String tbl_name, Map<String,String> partKVs,
+                                     PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException,
+       UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException;
+ 
+   /**
+    * @param partVals
+    * @throws TException
+    * @throws MetaException
+    */
+   void validatePartitionNameCharacters(List<String> partVals) throws TException, MetaException;
+ 
+   /**
+    * @param tbl
+    * @throws AlreadyExistsException
+    * @throws InvalidObjectException
+    * @throws MetaException
+    * @throws NoSuchObjectException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table)
+    */
+ 
+   void createTable(Table tbl) throws AlreadyExistsException,
+       InvalidObjectException, MetaException, NoSuchObjectException, TException;
+ 
+   /**
+    * Alter a table
+    * @param databaseName database name
+    * @param tblName table name
+    * @param table new table object, should be complete representation of the table, not just the
+    *             things you want to change.
+    * @throws InvalidOperationException something is wrong with the new table object or an
+    * operation was attempted that is not allowed (such as changing partition columns).
+    * @throws MetaException something went wrong, usually in the RDBMS
+    * @throws TException general thrift exception
+    */
+   void alter_table(String databaseName, String tblName, Table table)
+       throws InvalidOperationException, MetaException, TException;
+ 
+   /**
+    * Alter a table. Equivalent to
+    * {@link #alter_table(String, String, String, Table, EnvironmentContext)} with
+    * EnvironmentContext set to null.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param newTable new table object, should be complete representation of the table, not just the
+    *                 things you want to change.
+    * @throws InvalidOperationException something is wrong with the new table object or an
+    * operation was attempted that is not allowed (such as changing partition columns).
+    * @throws MetaException something went wrong, usually in the RDBMS
+    * @throws TException general thrift exception
+    */
+   default void alter_table(String catName, String dbName, String tblName, Table newTable)
+       throws InvalidOperationException, MetaException, TException {
+     alter_table(catName, dbName, tblName, newTable, null);
+   }
+ 
+   /**
+    * Alter a table.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param newTable new table object, should be complete representation of the table, not just the
+    *                 things you want to change.
+    * @param envContext options for the alter.
+    * @throws InvalidOperationException something is wrong with the new table object or an
+    * operation was attempted that is not allowed (such as changing partition columns).
+    * @throws MetaException something went wrong, usually in the RDBMS
+    * @throws TException general thrift exception
+    */
+   void alter_table(String catName, String dbName, String tblName, Table newTable,
+                   EnvironmentContext envContext)
+       throws InvalidOperationException, MetaException, TException;
+ 
+   /**
+    * @deprecated Use alter_table_with_environmentContext instead of alter_table with cascade option
+    * passed in EnvironmentContext using {@code StatsSetupConst.CASCADE}
+    */
+   @Deprecated
+   void alter_table(String defaultDatabaseName, String tblName, Table table,
+       boolean cascade) throws InvalidOperationException, MetaException, TException;
+ 
+   /**
+    * Alter a table.
+    * @param databaseName database name
+    * @param tblName table name
+    * @param table new table object, should be complete representation of the table, not just the
+    *              things you want to change.
+    * @param environmentContext options for the alter.
+    * @throws InvalidOperationException something is wrong with the new table object or an
+    * operation was attempted that is not allowed (such as changing partition columns).
+    * @throws MetaException something went wrong, usually in the RDBMS
+    * @throws TException general thrift exception
+    */
++  @Deprecated
+   void alter_table_with_environmentContext(String databaseName, String tblName, Table table,
+       EnvironmentContext environmentContext) throws InvalidOperationException, MetaException,
+       TException;
+ 
++  void alter_table(String catName, String databaseName, String tblName, Table table,
++      EnvironmentContext environmentContext, long txnId, String validWriteIdList)
++          throws InvalidOperationException, MetaException, TException;
+   /**
+    * Create a new database.
+    * @param db database object.  If the catalog name is null it will be assumed to be
+    *           {@link Warehouse#DEFAULT_CATALOG_NAME}.
+    * @throws InvalidObjectException There is something wrong with the database object.
+    * @throws AlreadyExistsException There is already a database of this name in the specified
+    * catalog.
+    * @throws MetaException something went wrong, usually in the RDBMS
+    * @throws TException general thrift error
+    */
+   void createDatabase(Database db)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Drop a database.
+    * @param name name of the database to drop.
+    * @throws NoSuchObjectException No such database exists.
+    * @throws InvalidOperationException The database cannot be dropped because it is not empty.
+    * @throws MetaException something went wrong, usually either in the RDMBS or in storage.
+    * @throws TException general thrift error.
+    */
+   void dropDatabase(String name)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException;
+ 
+   /**
+    *
+    * Drop a database.
+    * @param name name of the database to drop.
+    * @param deleteData whether to drop the underlying HDFS directory.
+    * @param ignoreUnknownDb whether to ignore an attempt to drop a non-existant database
+    * @throws NoSuchObjectException No database of this name exists in the specified catalog and
+    * ignoreUnknownDb is false.
+    * @throws InvalidOperationException The database cannot be dropped because it is not empty.
+    * @throws MetaException something went wrong, usually either in the RDMBS or in storage.
+    * @throws TException general thrift error.
+    */
+   void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException;
+ 
+   /**
+    *
+    * Drop a database.
+    * @param name database name.
+    * @param deleteData whether to drop the underlying HDFS directory.
+    * @param ignoreUnknownDb whether to ignore an attempt to drop a non-existant database
+    * @param cascade whether to drop contained tables, etc.  If this is false and there are
+    *                objects still in the database the drop will fail.
+    * @throws NoSuchObjectException No database of this name exists in the specified catalog and
+    * ignoreUnknownDb is false.
+    * @throws InvalidOperationException The database contains objects and cascade is false.
+    * @throws MetaException something went wrong, usually either in the RDBMS or storage.
+    * @throws TException general thrift error.
+    */
+   void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException;
+ 
+   /**
+    * Drop a database.
+    * @param catName Catalog name.  This can be null, in which case
+    *                {@link Warehouse#DEFAULT_CATALOG_NAME} will be assumed.
+    * @param dbName database name.
+    * @param deleteData whether to drop the underlying HDFS directory.
+    * @param ignoreUnknownDb whether to ignore an attempt to drop a non-existant database
+    * @param cascade whether to drop contained tables, etc.  If this is false and there are
+    *                objects still in the database the drop will fail.
+    * @throws NoSuchObjectException No database of this name exists in the specified catalog and
+    * ignoreUnknownDb is false.
+    * @throws InvalidOperationException The database contains objects and cascade is false.
+    * @throws MetaException something went wrong, usually either in the RDBMS or storage.
+    * @throws TException general thrift error.
+    */
+   void dropDatabase(String catName, String dbName, boolean deleteData, boolean ignoreUnknownDb,
+                     boolean cascade)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException;
+ 
+   /**
+    * Drop a database.  Equivalent to
+    * {@link #dropDatabase(String, String, boolean, boolean, boolean)} with cascade = false.
+    * @param catName Catalog name.  This can be null, in which case
+    *                {@link Warehouse#DEFAULT_CATALOG_NAME} will be assumed.
+    * @param dbName database name.
+    * @param deleteData whether to drop the underlying HDFS directory.
+    * @param ignoreUnknownDb whether to ignore an attempt to drop a non-existant database
+    * @throws NoSuchObjectException No database of this name exists in the specified catalog and
+    * ignoreUnknownDb is false.
+    * @throws InvalidOperationException The database contains objects and cascade is false.
+    * @throws MetaException something went wrong, usually either in the RDBMS or storage.
+    * @throws TException general thrift error.
+    */
+   default void dropDatabase(String catName, String dbName, boolean deleteData,
+                             boolean ignoreUnknownDb)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     dropDatabase(catName, dbName, deleteData, ignoreUnknownDb, false);
+   }
+ 
+   /**
+    * Drop a database.  Equivalent to
+    * {@link #dropDatabase(String, String, boolean, boolean, boolean)} with deleteData =
+    * true, ignoreUnknownDb = false, cascade = false.
+    * @param catName Catalog name.  This can be null, in which case
+    *                {@link Warehouse#DEFAULT_CATALOG_NAME} will be assumed.
+    * @param dbName database name.
+    * @throws NoSuchObjectException No database of this name exists in the specified catalog and
+    * ignoreUnknownDb is false.
+    * @throws InvalidOperationException The database contains objects and cascade is false.
+    * @throws MetaException something went wrong, usually either in the RDBMS or storage.
+    * @throws TException general thrift error.
+    */
+   default void dropDatabase(String catName, String dbName)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     dropDatabase(catName, dbName, true, false, false);
+   }
+ 
+ 
+   /**
+    * Alter a database.
+    * @param name database name.
+    * @param db new database object.
+    * @throws NoSuchObjectException No database of this name exists in the specified catalog.
+    * @throws MetaException something went wrong, usually in the RDBMS.
+    * @throws TException general thrift error.
+    */
+   void alterDatabase(String name, Database db)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Alter a database.
+    * @param catName Catalog name.  This can be null, in which case
+    *                {@link Warehouse#DEFAULT_CATALOG_NAME} will be assumed.
+    * @param dbName database name.
+    * @param newDb new database object.
+    * @throws NoSuchObjectException No database of this name exists in the specified catalog.
+    * @throws MetaException something went wrong, usually in the RDBMS.
+    * @throws TException general thrift error.
+    */
+   void alterDatabase(String catName, String dbName, Database newDb)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Drop a partition.
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param part_vals partition values, in the same order as the partition keys
+    * @param deleteData
+    *          delete the underlying data or just delete the partition in metadata
+    * @return true or false
+    * @throws NoSuchObjectException partition does not exist
+    * @throws MetaException error accessing the RDBMS or the storage.
+    * @throws TException thrift transport error
+    */
+   boolean dropPartition(String db_name, String tbl_name,
+       List<String> part_vals, boolean deleteData) throws NoSuchObjectException,
+       MetaException, TException;
+ 
+   /**
+    * Drop a partition.
+    * @param catName catalog name.
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param part_vals partition values, in the same order as the partition keys
+    * @param deleteData
+    *          delete the underlying data or just delete the partition in metadata
+    * @return true or false
+    * @throws NoSuchObjectException partition does not exist
+    * @throws MetaException error accessing the RDBMS or the storage.
+    * @throws TException thrift transport error
+    */
+   boolean dropPartition(String catName, String db_name, String tbl_name,
+               

<TRUNCATED>

[50/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0725

Posted by se...@apache.org.
HIVE-19416 : merge master into branch (Sergey Shelukhin) 0725


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/758ff449
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/758ff449
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/758ff449

Branch: refs/heads/master
Commit: 758ff449099065a84c46d63f9418201c8a6731b1
Parents: 5e7a8b5 71c4987
Author: sergey <se...@apache.org>
Authored: Wed Jul 25 11:18:51 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Wed Jul 25 11:18:51 2018 -0700

----------------------------------------------------------------------
 .../VectorizedComparisonBench.java              |   19 +-
 .../tezplugins/LlapTaskSchedulerService.java    |   12 +-
 .../ExpressionTemplates/ColumnCompareColumn.txt |    4 +-
 .../ExpressionTemplates/ColumnCompareScalar.txt |    4 +-
 .../Decimal64ColumnCompareDecimal64Column.txt   |   54 +
 .../Decimal64ColumnCompareDecimal64Scalar.txt   |   66 +
 .../Decimal64ScalarCompareDecimal64Column.txt   |   66 +
 .../DecimalColumnCompareDecimalColumn.txt       |  153 +
 .../DecimalColumnCompareDecimalScalar.txt       |  177 +
 .../DecimalScalarCompareDecimalColumn.txt       |  180 +
 .../ExpressionTemplates/IfExprColumnScalar.txt  |    6 +-
 .../IfExprObjectColumnColumn.txt                |   41 +
 .../IfExprObjectColumnScalar.txt                |   22 +
 .../IfExprObjectScalarColumn.txt                |   22 +
 .../ExpressionTemplates/IfExprScalarColumn.txt  |    6 +-
 .../ExpressionTemplates/IfExprScalarScalar.txt  |    6 +-
 .../ExpressionTemplates/ScalarCompareColumn.txt |    4 +-
 .../exec/vector/VectorExpressionDescriptor.java |    2 +
 .../ql/exec/vector/VectorizationContext.java    |  222 +-
 .../exec/vector/VectorizationContext.java.orig  | 3771 ++++++++++++++++++
 .../vector/expressions/CastCharToBinary.java    |   55 +
 .../expressions/CastStringToTimestamp.java      |  177 +
 .../IfExprCharScalarStringScalar.java           |    2 +-
 .../IfExprDecimal64ColumnDecimal64Column.java   |   55 +
 .../IfExprDecimal64ColumnDecimal64Scalar.java   |   70 +
 .../IfExprDecimal64ScalarDecimal64Column.java   |   71 +
 .../IfExprDecimal64ScalarDecimal64Scalar.java   |   75 +
 .../expressions/IfExprLongColumnLongColumn.java |    6 +-
 ...fExprStringGroupColumnStringGroupColumn.java |    4 +-
 .../IfExprStringGroupColumnStringScalar.java    |    4 +-
 .../IfExprStringScalarStringGroupColumn.java    |    4 +-
 .../IfExprStringScalarStringScalar.java         |    4 +-
 .../IfExprVarCharScalarStringScalar.java        |    2 +-
 .../expressions/LongColEqualLongColumn.java     |  146 -
 .../expressions/LongColEqualLongScalar.java     |  157 -
 .../LongColGreaterEqualLongColumn.java          |  146 -
 .../LongColGreaterEqualLongScalar.java          |  158 -
 .../expressions/LongColGreaterLongColumn.java   |  146 -
 .../expressions/LongColGreaterLongScalar.java   |  157 -
 .../expressions/LongColLessEqualLongColumn.java |  146 -
 .../expressions/LongColLessEqualLongScalar.java |  158 -
 .../expressions/LongColLessLongColumn.java      |  146 -
 .../expressions/LongColLessLongScalar.java      |  158 -
 .../expressions/LongColNotEqualLongColumn.java  |  146 -
 .../expressions/LongColNotEqualLongScalar.java  |  158 -
 .../expressions/LongScalarEqualLongColumn.java  |  157 -
 .../LongScalarGreaterEqualLongColumn.java       |  160 -
 .../LongScalarGreaterLongColumn.java            |  161 -
 .../LongScalarLessEqualLongColumn.java          |  160 -
 .../expressions/LongScalarLessLongColumn.java   |  161 -
 .../LongScalarNotEqualLongColumn.java           |  161 -
 .../hive/ql/udf/generic/GenericUDFIf.java       |   16 +
 .../hive/ql/udf/generic/GenericUDFOPEqual.java  |    8 +-
 .../generic/GenericUDFOPEqualOrGreaterThan.java |    8 +-
 .../generic/GenericUDFOPEqualOrLessThan.java    |    8 +-
 .../ql/udf/generic/GenericUDFOPGreaterThan.java |    8 +-
 .../ql/udf/generic/GenericUDFOPLessThan.java    |    8 +-
 .../ql/udf/generic/GenericUDFOPNotEqual.java    |    8 +-
 .../ql/udf/generic/GenericUDFTimestamp.java     |    3 +-
 .../exec/vector/TestVectorizationContext.java   |    4 +-
 .../expressions/TestVectorArithmetic.java       |   11 +
 .../expressions/TestVectorCastStatement.java    |   42 +-
 .../expressions/TestVectorDateAddSub.java       |   10 +
 .../vector/expressions/TestVectorDateDiff.java  |   11 +
 .../expressions/TestVectorFilterCompare.java    |  795 ++++
 .../expressions/TestVectorIfStatement.java      |   74 +-
 .../vector/expressions/TestVectorNegative.java  |    9 +
 .../expressions/TestVectorStringConcat.java     |   11 +
 .../expressions/TestVectorStringUnary.java      |   12 +-
 .../vector/expressions/TestVectorSubStr.java    |    9 +
 .../expressions/TestVectorTimestampExtract.java |    9 +
 .../clientnegative/alter_table_wrong_db.q       |    4 +
 .../clientnegative/alter_table_wrong_table.q    |    3 +
 .../clientnegative/alter_table_wrong_db.q.out   |   25 +
 .../alter_table_wrong_table.q.out               |   13 +
 .../convert_decimal64_to_decimal.q.out          |    6 +-
 .../llap/convert_decimal64_to_decimal.q.out     |   10 +-
 .../llap/vector_case_when_1.q.out               |   12 +-
 .../llap/vector_decimal_mapjoin.q.out           |   24 +-
 .../llap/vector_outer_reference_windowed.q.out  |   48 +-
 .../llap/vector_udf_adaptor_1.q.out             |   32 +-
 .../clientpositive/llap/vectorized_case.q.out   |   16 +-
 .../clientpositive/llap/vectorized_casts.q.out  |    4 +-
 ...vectorized_dynamic_semijoin_reduction2.q.out |    4 +-
 .../llap/vectorized_mapjoin3.q.out              |    6 +-
 .../llap/vectorized_timestamp_funcs.q.out       |   12 +-
 .../llap/vectorized_timestamp_ints_casts.q.out  |    8 +-
 .../spark/vector_decimal_mapjoin.q.out          |   24 +-
 .../clientpositive/spark/vectorized_case.q.out  |   16 +-
 .../spark/vectorized_timestamp_funcs.q.out      |   12 +-
 .../clientpositive/vector_case_when_1.q.out     |   12 +-
 .../clientpositive/vector_decimal_mapjoin.q.out |   16 +-
 .../clientpositive/vectorized_case.q.out        |   16 +-
 .../clientpositive/vectorized_casts.q.out       |    4 +-
 .../vectorized_timestamp_funcs.q.out            |   12 +-
 .../vectorized_timestamp_ints_casts.q.out       |    8 +-
 .../hadoop/hive/metastore/HiveAlterHandler.java |    4 +-
 .../ql/exec/vector/DecimalColumnVector.java     |   21 +-
 .../ql/exec/vector/expressions/StringExpr.java  |   17 +
 .../apache/hadoop/hive/tools/GenVectorCode.java |  126 +-
 100 files changed, 6575 insertions(+), 3152 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/758ff449/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
----------------------------------------------------------------------


[27/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 0000000,47f819b..285f7fb
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@@ -1,0 -1,9353 +1,9602 @@@
+ /* * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import static org.apache.commons.lang.StringUtils.join;
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_COMMENT;
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+ import static org.apache.hadoop.hive.metastore.Warehouse.getCatalogQualifiedTableName;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.parseDbName;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.CAT_NAME;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DB_NAME;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependCatalogToDbName;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependNotNullCatToDbName;
+ 
+ import java.io.IOException;
+ import java.net.InetAddress;
+ import java.net.UnknownHostException;
+ import java.nio.ByteBuffer;
+ import java.security.PrivilegedExceptionAction;
+ import java.util.AbstractMap;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collection;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.HashSet;
+ import java.util.Iterator;
+ import java.util.LinkedHashMap;
+ import java.util.LinkedList;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Map.Entry;
+ import java.util.Objects;
+ import java.util.Properties;
+ import java.util.Set;
+ import java.util.concurrent.ConcurrentHashMap;
+ import java.util.concurrent.ExecutionException;
+ import java.util.concurrent.ExecutorService;
+ import java.util.concurrent.Executors;
+ import java.util.concurrent.Future;
+ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.atomic.AtomicBoolean;
+ import java.util.concurrent.atomic.AtomicInteger;
+ import java.util.concurrent.locks.Condition;
+ import java.util.concurrent.locks.Lock;
+ import java.util.concurrent.locks.ReentrantLock;
+ import java.util.regex.Pattern;
+ 
+ import javax.jdo.JDOException;
+ 
+ import com.codahale.metrics.Counter;
+ import com.google.common.collect.ImmutableList;
+ import com.google.common.collect.ImmutableListMultimap;
+ import com.google.common.collect.Lists;
+ import com.google.common.collect.Multimaps;
+ 
+ import org.apache.commons.cli.OptionBuilder;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.fs.FileStatus;
+ import org.apache.hadoop.fs.FileSystem;
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.hive.common.StatsSetupConst;
+ import org.apache.hadoop.hive.common.TableName;
+ import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.events.AddForeignKeyEvent;
+ import org.apache.hadoop.hive.metastore.events.AcidWriteEvent;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.StatsUpdateMode;
+ import org.apache.hadoop.hive.metastore.events.AbortTxnEvent;
+ import org.apache.hadoop.hive.metastore.events.AddNotNullConstraintEvent;
+ import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.AddPrimaryKeyEvent;
+ import org.apache.hadoop.hive.metastore.events.AddUniqueConstraintEvent;
+ import org.apache.hadoop.hive.metastore.events.AllocWriteIdEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterCatalogEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterISchemaEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterSchemaVersionEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+ import org.apache.hadoop.hive.metastore.events.CommitTxnEvent;
+ import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent;
+ import org.apache.hadoop.hive.metastore.events.CreateCatalogEvent;
+ import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.CreateFunctionEvent;
+ import org.apache.hadoop.hive.metastore.events.CreateISchemaEvent;
+ import org.apache.hadoop.hive.metastore.events.AddSchemaVersionEvent;
+ import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
+ import org.apache.hadoop.hive.metastore.events.DropCatalogEvent;
+ import org.apache.hadoop.hive.metastore.events.DropConstraintEvent;
+ import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.DropFunctionEvent;
+ import org.apache.hadoop.hive.metastore.events.DropISchemaEvent;
+ import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.DropSchemaVersionEvent;
+ import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+ import org.apache.hadoop.hive.metastore.events.InsertEvent;
+ import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
+ import org.apache.hadoop.hive.metastore.events.OpenTxnEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAddPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAlterCatalogEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAlterDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAlterISchemaEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAlterPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAlterSchemaVersionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAuthorizationCallEvent;
+ import org.apache.hadoop.hive.metastore.events.PreCreateCatalogEvent;
+ import org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.PreCreateISchemaEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAddSchemaVersionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropCatalogEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropISchemaEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropSchemaVersionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropTableEvent;
+ import org.apache.hadoop.hive.metastore.events.PreEventContext;
+ import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent;
+ import org.apache.hadoop.hive.metastore.events.PreReadCatalogEvent;
+ import org.apache.hadoop.hive.metastore.events.PreReadDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.PreReadISchemaEvent;
+ import org.apache.hadoop.hive.metastore.events.PreReadTableEvent;
+ import org.apache.hadoop.hive.metastore.events.PreReadhSchemaVersionEvent;
+ import org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType;
+ import org.apache.hadoop.hive.metastore.metrics.JvmPauseMonitor;
+ import org.apache.hadoop.hive.metastore.metrics.Metrics;
+ import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+ import org.apache.hadoop.hive.metastore.metrics.PerfLogger;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+ import org.apache.hadoop.hive.metastore.security.MetastoreDelegationTokenManager;
+ import org.apache.hadoop.hive.metastore.security.TUGIContainingTransport;
+ import org.apache.hadoop.hive.metastore.txn.TxnStore;
+ import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+ import org.apache.hadoop.security.SecurityUtil;
+ import org.apache.hadoop.hive.metastore.utils.CommonCliOptions;
+ import org.apache.hadoop.hive.metastore.utils.FileUtils;
+ import org.apache.hadoop.hive.metastore.utils.HdfsUtils;
+ import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+ import org.apache.hadoop.hive.metastore.utils.LogUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetastoreVersionInfo;
+ import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+ import org.apache.hadoop.security.UserGroupInformation;
+ import org.apache.hadoop.util.ReflectionUtils;
+ import org.apache.hadoop.util.ShutdownHookManager;
+ import org.apache.hadoop.util.StringUtils;
+ import org.apache.logging.log4j.LogManager;
+ import org.apache.logging.log4j.core.LoggerContext;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.TProcessor;
+ import org.apache.thrift.protocol.TBinaryProtocol;
+ import org.apache.thrift.protocol.TCompactProtocol;
+ import org.apache.thrift.protocol.TProtocol;
+ import org.apache.thrift.protocol.TProtocolFactory;
+ import org.apache.thrift.server.ServerContext;
+ import org.apache.thrift.server.TServer;
+ import org.apache.thrift.server.TServerEventHandler;
+ import org.apache.thrift.server.TThreadPoolServer;
+ import org.apache.thrift.transport.TFramedTransport;
+ import org.apache.thrift.transport.TServerSocket;
+ import org.apache.thrift.transport.TTransport;
+ import org.apache.thrift.transport.TTransportFactory;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import com.facebook.fb303.FacebookBase;
+ import com.facebook.fb303.fb_status;
+ import com.google.common.annotations.VisibleForTesting;
+ import com.google.common.base.Preconditions;
+ import com.google.common.base.Splitter;
+ import com.google.common.util.concurrent.ThreadFactoryBuilder;
+ 
+ /**
+  * TODO:pc remove application logic to a separate interface.
+  */
+ public class HiveMetaStore extends ThriftHiveMetastore {
+   public static final Logger LOG = LoggerFactory.getLogger(HiveMetaStore.class);
+   public static final String PARTITION_NUMBER_EXCEED_LIMIT_MSG =
+       "Number of partitions scanned (=%d) on table '%s' exceeds limit (=%d). This is controlled on the metastore server by %s.";
+ 
+   // boolean that tells if the HiveMetaStore (remote) server is being used.
+   // Can be used to determine if the calls to metastore api (HMSHandler) are being made with
+   // embedded metastore or a remote one
+   private static boolean isMetaStoreRemote = false;
+ 
+   // Used for testing to simulate method timeout.
+   @VisibleForTesting
+   static boolean TEST_TIMEOUT_ENABLED = false;
+   @VisibleForTesting
+   static long TEST_TIMEOUT_VALUE = -1;
+ 
+   private static ShutdownHookManager shutdownHookMgr;
+ 
+   public static final String ADMIN = "admin";
+   public static final String PUBLIC = "public";
+   /** MM write states. */
+   public static final char MM_WRITE_OPEN = 'o', MM_WRITE_COMMITTED = 'c', MM_WRITE_ABORTED = 'a';
+ 
+   private static HadoopThriftAuthBridge.Server saslServer;
+   private static MetastoreDelegationTokenManager delegationTokenManager;
+   private static boolean useSasl;
+ 
+   static final String NO_FILTER_STRING = "";
+   static final int UNLIMITED_MAX_PARTITIONS = -1;
+ 
+   private static final class ChainedTTransportFactory extends TTransportFactory {
+     private final TTransportFactory parentTransFactory;
+     private final TTransportFactory childTransFactory;
+ 
+     private ChainedTTransportFactory(
+         TTransportFactory parentTransFactory,
+         TTransportFactory childTransFactory) {
+       this.parentTransFactory = parentTransFactory;
+       this.childTransFactory = childTransFactory;
+     }
+ 
+     @Override
+     public TTransport getTransport(TTransport trans) {
+       return childTransFactory.getTransport(parentTransFactory.getTransport(trans));
+     }
+   }
+ 
+   public static boolean isRenameAllowed(Database srcDB, Database destDB) {
+     if (!srcDB.getName().equalsIgnoreCase(destDB.getName())) {
+       if (ReplChangeManager.isSourceOfReplication(srcDB) || ReplChangeManager.isSourceOfReplication(destDB)) {
+         return false;
+       }
+     }
+     return true;
+   }
+ 
+   public static class HMSHandler extends FacebookBase implements IHMSHandler {
+     public static final Logger LOG = HiveMetaStore.LOG;
+     private final Configuration conf; // stores datastore (jpox) properties,
+                                      // right now they come from jpox.properties
+ 
+     // Flag to control that always threads are initialized only once
+     // instead of multiple times
+     private final static AtomicBoolean alwaysThreadsInitialized =
+         new AtomicBoolean(false);
+ 
+     private static String currentUrl;
+     private FileMetadataManager fileMetadataManager;
+     private PartitionExpressionProxy expressionProxy;
+     private StorageSchemaReader storageSchemaReader;
+ 
+     // Variables for metrics
+     // Package visible so that HMSMetricsListener can see them.
+     static AtomicInteger databaseCount, tableCount, partCount;
+ 
+     private Warehouse wh; // hdfs warehouse
+     private static final ThreadLocal<RawStore> threadLocalMS =
+         new ThreadLocal<RawStore>() {
+           @Override
+           protected RawStore initialValue() {
+             return null;
+           }
+         };
+ 
+     private static final ThreadLocal<TxnStore> threadLocalTxn = new ThreadLocal<TxnStore>() {
+       @Override
+       protected TxnStore initialValue() {
+         return null;
+       }
+     };
+ 
+     private static final ThreadLocal<Map<String, com.codahale.metrics.Timer.Context>> timerContexts =
+         new ThreadLocal<Map<String, com.codahale.metrics.Timer.Context>>() {
+       @Override
+       protected Map<String, com.codahale.metrics.Timer.Context> initialValue() {
+         return new HashMap<>();
+       }
+     };
+ 
+     public static RawStore getRawStore() {
+       return threadLocalMS.get();
+     }
+ 
+     static void removeRawStore() {
+       threadLocalMS.remove();
+     }
+ 
+     // Thread local configuration is needed as many threads could make changes
+     // to the conf using the connection hook
+     private static final ThreadLocal<Configuration> threadLocalConf =
+         new ThreadLocal<Configuration>() {
+           @Override
+           protected Configuration initialValue() {
+             return null;
+           }
+         };
+ 
+     /**
+      * Thread local HMSHandler used during shutdown to notify meta listeners
+      */
+     private static final ThreadLocal<HMSHandler> threadLocalHMSHandler = new ThreadLocal<>();
+ 
+     /**
+      * Thread local Map to keep track of modified meta conf keys
+      */
+     private static final ThreadLocal<Map<String, String>> threadLocalModifiedConfig =
+         new ThreadLocal<>();
+ 
+     private static ExecutorService threadPool;
+ 
+     static final Logger auditLog = LoggerFactory.getLogger(
+         HiveMetaStore.class.getName() + ".audit");
+ 
+     private static void logAuditEvent(String cmd) {
+       if (cmd == null) {
+         return;
+       }
+ 
+       UserGroupInformation ugi;
+       try {
+         ugi = SecurityUtils.getUGI();
+       } catch (Exception ex) {
+         throw new RuntimeException(ex);
+       }
+ 
+       String address = getIPAddress();
+       if (address == null) {
+         address = "unknown-ip-addr";
+       }
+ 
+       auditLog.info("ugi={}	ip={}	cmd={}	", ugi.getUserName(), address, cmd);
+     }
+ 
+     private static String getIPAddress() {
+       if (useSasl) {
+         if (saslServer != null && saslServer.getRemoteAddress() != null) {
+           return saslServer.getRemoteAddress().getHostAddress();
+         }
+       } else {
+         // if kerberos is not enabled
+         return getThreadLocalIpAddress();
+       }
+       return null;
+     }
+ 
+     private static AtomicInteger nextSerialNum = new AtomicInteger();
+     private static ThreadLocal<Integer> threadLocalId = new ThreadLocal<Integer>() {
+       @Override
+       protected Integer initialValue() {
+         return nextSerialNum.getAndIncrement();
+       }
+     };
+ 
+     // This will only be set if the metastore is being accessed from a metastore Thrift server,
+     // not if it is from the CLI. Also, only if the TTransport being used to connect is an
+     // instance of TSocket. This is also not set when kerberos is used.
+     private static ThreadLocal<String> threadLocalIpAddress = new ThreadLocal<String>() {
+       @Override
+       protected String initialValue() {
+         return null;
+       }
+     };
+ 
+     /**
+      * Internal function to notify listeners for meta config change events
+      */
+     private void notifyMetaListeners(String key, String oldValue, String newValue) throws MetaException {
+       for (MetaStoreEventListener listener : listeners) {
+         listener.onConfigChange(new ConfigChangeEvent(this, key, oldValue, newValue));
+       }
+ 
+       if (transactionalListeners.size() > 0) {
+         // All the fields of this event are final, so no reason to create a new one for each
+         // listener
+         ConfigChangeEvent cce = new ConfigChangeEvent(this, key, oldValue, newValue);
+         for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+           transactionalListener.onConfigChange(cce);
+         }
+       }
+     }
+ 
+     /**
+      * Internal function to notify listeners to revert back to old values of keys
+      * that were modified during setMetaConf. This would get called from HiveMetaStore#cleanupRawStore
+      */
+     private void notifyMetaListenersOnShutDown() {
+       Map<String, String> modifiedConf = threadLocalModifiedConfig.get();
+       if (modifiedConf == null) {
+         // Nothing got modified
+         return;
+       }
+       try {
+         Configuration conf = threadLocalConf.get();
+         if (conf == null) {
+           throw new MetaException("Unexpected: modifiedConf is non-null but conf is null");
+         }
+         // Notify listeners of the changed value
+         for (Entry<String, String> entry : modifiedConf.entrySet()) {
+           String key = entry.getKey();
+           // curr value becomes old and vice-versa
+           String currVal = entry.getValue();
+           String oldVal = conf.get(key);
+           if (!Objects.equals(oldVal, currVal)) {
+             notifyMetaListeners(key, oldVal, currVal);
+           }
+         }
+         logInfo("Meta listeners shutdown notification completed.");
+       } catch (MetaException e) {
+         LOG.error("Failed to notify meta listeners on shutdown: ", e);
+       }
+     }
+ 
+     static void setThreadLocalIpAddress(String ipAddress) {
+       threadLocalIpAddress.set(ipAddress);
+     }
+ 
+     // This will return null if the metastore is not being accessed from a metastore Thrift server,
+     // or if the TTransport being used to connect is not an instance of TSocket, or if kereberos
+     // is used
+     static String getThreadLocalIpAddress() {
+       return threadLocalIpAddress.get();
+     }
+ 
+     // Make it possible for tests to check that the right type of PartitionExpressionProxy was
+     // instantiated.
+     @VisibleForTesting
+     PartitionExpressionProxy getExpressionProxy() {
+       return expressionProxy;
+     }
+ 
+     /**
+      * Use {@link #getThreadId()} instead.
+      * @return thread id
+      */
+     @Deprecated
+     public static Integer get() {
+       return threadLocalId.get();
+     }
+ 
+     @Override
+     public int getThreadId() {
+       return threadLocalId.get();
+     }
+ 
+     public HMSHandler(String name) throws MetaException {
+       this(name, MetastoreConf.newMetastoreConf(), true);
+     }
+ 
+     public HMSHandler(String name, Configuration conf) throws MetaException {
+       this(name, conf, true);
+     }
+ 
+     public HMSHandler(String name, Configuration conf, boolean init) throws MetaException {
+       super(name);
+       this.conf = conf;
+       isInTest = MetastoreConf.getBoolVar(this.conf, ConfVars.HIVE_IN_TEST);
+       if (threadPool == null) {
+         synchronized (HMSHandler.class) {
+           int numThreads = MetastoreConf.getIntVar(conf, ConfVars.FS_HANDLER_THREADS_COUNT);
+           threadPool = Executors.newFixedThreadPool(numThreads,
+               new ThreadFactoryBuilder().setDaemon(true)
+                   .setNameFormat("HMSHandler #%d").build());
+         }
+       }
+       if (init) {
+         init();
+       }
+     }
+ 
+     /**
+      * Use {@link #getConf()} instead.
+      * @return Configuration object
+      */
+     @Deprecated
+     public Configuration getHiveConf() {
+       return conf;
+     }
+ 
+     private ClassLoader classLoader;
+     private AlterHandler alterHandler;
+     private List<MetaStorePreEventListener> preListeners;
+     private List<MetaStoreEventListener> listeners;
+     private List<TransactionalMetaStoreEventListener> transactionalListeners;
+     private List<MetaStoreEndFunctionListener> endFunctionListeners;
+     private List<MetaStoreInitListener> initListeners;
+     private Pattern partitionValidationPattern;
+     private final boolean isInTest;
+ 
+     {
+       classLoader = Thread.currentThread().getContextClassLoader();
+       if (classLoader == null) {
+         classLoader = Configuration.class.getClassLoader();
+       }
+     }
+ 
+     @Override
+     public List<TransactionalMetaStoreEventListener> getTransactionalListeners() {
+       return transactionalListeners;
+     }
+ 
+     @Override
+     public List<MetaStoreEventListener> getListeners() {
+       return listeners;
+     }
+ 
+     @Override
+     public void init() throws MetaException {
+       initListeners = MetaStoreUtils.getMetaStoreListeners(
+           MetaStoreInitListener.class, conf, MetastoreConf.getVar(conf, ConfVars.INIT_HOOKS));
+       for (MetaStoreInitListener singleInitListener: initListeners) {
+           MetaStoreInitContext context = new MetaStoreInitContext();
+           singleInitListener.onInit(context);
+       }
+ 
+       String alterHandlerName = MetastoreConf.getVar(conf, ConfVars.ALTER_HANDLER);
+       alterHandler = ReflectionUtils.newInstance(JavaUtils.getClass(
+           alterHandlerName, AlterHandler.class), conf);
+       wh = new Warehouse(conf);
+ 
+       synchronized (HMSHandler.class) {
+         if (currentUrl == null || !currentUrl.equals(MetaStoreInit.getConnectionURL(conf))) {
+           createDefaultDB();
+           createDefaultRoles();
+           addAdminUsers();
+           currentUrl = MetaStoreInit.getConnectionURL(conf);
+         }
+       }
+ 
+       //Start Metrics
+       if (MetastoreConf.getBoolVar(conf, ConfVars.METRICS_ENABLED)) {
+         LOG.info("Begin calculating metadata count metrics.");
+         Metrics.initialize(conf);
+         databaseCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_DATABASES);
+         tableCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_TABLES);
+         partCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_PARTITIONS);
+         updateMetrics();
+ 
+       }
+ 
+       preListeners = MetaStoreUtils.getMetaStoreListeners(MetaStorePreEventListener.class,
+           conf, MetastoreConf.getVar(conf, ConfVars.PRE_EVENT_LISTENERS));
+       preListeners.add(0, new TransactionalValidationListener(conf));
+       listeners = MetaStoreUtils.getMetaStoreListeners(MetaStoreEventListener.class, conf,
+           MetastoreConf.getVar(conf, ConfVars.EVENT_LISTENERS));
+       listeners.add(new SessionPropertiesListener(conf));
+       listeners.add(new AcidEventListener(conf));
+       transactionalListeners = MetaStoreUtils.getMetaStoreListeners(TransactionalMetaStoreEventListener.class,
+           conf, MetastoreConf.getVar(conf, ConfVars.TRANSACTIONAL_EVENT_LISTENERS));
+       if (Metrics.getRegistry() != null) {
+         listeners.add(new HMSMetricsListener(conf));
+       }
+ 
+       endFunctionListeners = MetaStoreUtils.getMetaStoreListeners(
+           MetaStoreEndFunctionListener.class, conf, MetastoreConf.getVar(conf, ConfVars.END_FUNCTION_LISTENERS));
+ 
+       String partitionValidationRegex =
+           MetastoreConf.getVar(conf, ConfVars.PARTITION_NAME_WHITELIST_PATTERN);
+       if (partitionValidationRegex != null && !partitionValidationRegex.isEmpty()) {
+         partitionValidationPattern = Pattern.compile(partitionValidationRegex);
+       } else {
+         partitionValidationPattern = null;
+       }
+ 
+       // We only initialize once the tasks that need to be run periodically
+       if (alwaysThreadsInitialized.compareAndSet(false, true)) {
+         ThreadPool.initialize(conf);
+         Collection<String> taskNames =
+             MetastoreConf.getStringCollection(conf, ConfVars.TASK_THREADS_ALWAYS);
+         for (String taskName : taskNames) {
+           MetastoreTaskThread task =
+               JavaUtils.newInstance(JavaUtils.getClass(taskName, MetastoreTaskThread.class));
+           task.setConf(conf);
+           long freq = task.runFrequency(TimeUnit.MILLISECONDS);
+           // For backwards compatibility, since some threads used to be hard coded but only run if
+           // frequency was > 0
+           if (freq > 0) {
+             ThreadPool.getPool().scheduleAtFixedRate(task, freq, freq, TimeUnit.MILLISECONDS);
+           }
+         }
+       }
+       expressionProxy = PartFilterExprUtil.createExpressionProxy(conf);
+       fileMetadataManager = new FileMetadataManager(this.getMS(), conf);
+     }
+ 
+     private static String addPrefix(String s) {
+       return threadLocalId.get() + ": " + s;
+     }
+ 
+     /**
+      * Set copy of invoking HMSHandler on thread local
+      */
+     private static void setHMSHandler(HMSHandler handler) {
+       if (threadLocalHMSHandler.get() == null) {
+         threadLocalHMSHandler.set(handler);
+       }
+     }
+     @Override
+     public void setConf(Configuration conf) {
+       threadLocalConf.set(conf);
+       RawStore ms = threadLocalMS.get();
+       if (ms != null) {
+         ms.setConf(conf); // reload if DS related configuration is changed
+       }
+     }
+ 
+     @Override
+     public Configuration getConf() {
+       Configuration conf = threadLocalConf.get();
+       if (conf == null) {
+         conf = new Configuration(this.conf);
+         threadLocalConf.set(conf);
+       }
+       return conf;
+     }
+ 
+     private Map<String, String> getModifiedConf() {
+       Map<String, String> modifiedConf = threadLocalModifiedConfig.get();
+       if (modifiedConf == null) {
+         modifiedConf = new HashMap<>();
+         threadLocalModifiedConfig.set(modifiedConf);
+       }
+       return modifiedConf;
+     }
+ 
+     @Override
+     public Warehouse getWh() {
+       return wh;
+     }
+ 
+     @Override
+     public void setMetaConf(String key, String value) throws MetaException {
+       ConfVars confVar = MetastoreConf.getMetaConf(key);
+       if (confVar == null) {
+         throw new MetaException("Invalid configuration key " + key);
+       }
+       try {
+         confVar.validate(value);
+       } catch (IllegalArgumentException e) {
+         throw new MetaException("Invalid configuration value " + value + " for key " + key +
+             " by " + e.getMessage());
+       }
+       Configuration configuration = getConf();
+       String oldValue = MetastoreConf.get(configuration, key);
+       // Save prev val of the key on threadLocal
+       Map<String, String> modifiedConf = getModifiedConf();
+       if (!modifiedConf.containsKey(key)) {
+         modifiedConf.put(key, oldValue);
+       }
+       // Set invoking HMSHandler on threadLocal, this will be used later to notify
+       // metaListeners in HiveMetaStore#cleanupRawStore
+       setHMSHandler(this);
+       configuration.set(key, value);
+       notifyMetaListeners(key, oldValue, value);
+ 
+       if (ConfVars.TRY_DIRECT_SQL == confVar) {
+         HMSHandler.LOG.info("Direct SQL optimization = {}",  value);
+       }
+     }
+ 
+     @Override
+     public String getMetaConf(String key) throws MetaException {
+       ConfVars confVar = MetastoreConf.getMetaConf(key);
+       if (confVar == null) {
+         throw new MetaException("Invalid configuration key " + key);
+       }
+       return getConf().get(key, confVar.getDefaultVal().toString());
+     }
+ 
+     /**
+      * Get a cached RawStore.
+      *
+      * @return the cached RawStore
+      * @throws MetaException
+      */
+     @Override
+     public RawStore getMS() throws MetaException {
+       Configuration conf = getConf();
+       return getMSForConf(conf);
+     }
+ 
+     public static RawStore getMSForConf(Configuration conf) throws MetaException {
+       RawStore ms = threadLocalMS.get();
+       if (ms == null) {
+         ms = newRawStoreForConf(conf);
+         ms.verifySchema();
+         threadLocalMS.set(ms);
+         ms = threadLocalMS.get();
+       }
+       return ms;
+     }
+ 
+     @Override
+     public TxnStore getTxnHandler() {
++      return getMsThreadTxnHandler(conf);
++    }
++
++    public static TxnStore getMsThreadTxnHandler(Configuration conf) {
+       TxnStore txn = threadLocalTxn.get();
+       if (txn == null) {
+         txn = TxnUtils.getTxnStore(conf);
+         threadLocalTxn.set(txn);
+       }
+       return txn;
+     }
+ 
+     static RawStore newRawStoreForConf(Configuration conf) throws MetaException {
+       Configuration newConf = new Configuration(conf);
+       String rawStoreClassName = MetastoreConf.getVar(newConf, ConfVars.RAW_STORE_IMPL);
+       LOG.info(addPrefix("Opening raw store with implementation class:" + rawStoreClassName));
+       return RawStoreProxy.getProxy(newConf, conf, rawStoreClassName, threadLocalId.get());
+     }
+ 
+     @VisibleForTesting
+     public static void createDefaultCatalog(RawStore ms, Warehouse wh) throws MetaException,
+         InvalidOperationException {
+       try {
+         Catalog defaultCat = ms.getCatalog(DEFAULT_CATALOG_NAME);
+         // Null check because in some test cases we get a null from ms.getCatalog.
+         if (defaultCat !=null && defaultCat.getLocationUri().equals("TBD")) {
+           // One time update issue.  When the new 'hive' catalog is created in an upgrade the
+           // script does not know the location of the warehouse.  So we need to update it.
+           LOG.info("Setting location of default catalog, as it hasn't been done after upgrade");
+           defaultCat.setLocationUri(wh.getWhRoot().toString());
+           ms.alterCatalog(defaultCat.getName(), defaultCat);
+         }
+ 
+       } catch (NoSuchObjectException e) {
+         Catalog cat = new Catalog(DEFAULT_CATALOG_NAME, wh.getWhRoot().toString());
+         cat.setDescription(Warehouse.DEFAULT_CATALOG_COMMENT);
+         ms.createCatalog(cat);
+       }
+     }
+ 
+     private void createDefaultDB_core(RawStore ms) throws MetaException, InvalidObjectException {
+       try {
+         ms.getDatabase(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME);
+       } catch (NoSuchObjectException e) {
+         Database db = new Database(DEFAULT_DATABASE_NAME, DEFAULT_DATABASE_COMMENT,
+           wh.getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString(), null);
+         db.setOwnerName(PUBLIC);
+         db.setOwnerType(PrincipalType.ROLE);
+         db.setCatalogName(DEFAULT_CATALOG_NAME);
+         ms.createDatabase(db);
+       }
+     }
+ 
+     /**
+      * create default database if it doesn't exist.
+      *
+      * This is a potential contention when HiveServer2 using embedded metastore and Metastore
+      * Server try to concurrently invoke createDefaultDB. If one failed, JDOException was caught
+      * for one more time try, if failed again, simply ignored by warning, which meant another
+      * succeeds.
+      *
+      * @throws MetaException
+      */
+     private void createDefaultDB() throws MetaException {
+       try {
+         RawStore ms = getMS();
+         createDefaultCatalog(ms, wh);
+         createDefaultDB_core(ms);
+       } catch (JDOException e) {
+         LOG.warn("Retrying creating default database after error: " + e.getMessage(), e);
+         try {
+           createDefaultDB_core(getMS());
+         } catch (InvalidObjectException e1) {
+           throw new MetaException(e1.getMessage());
+         }
+       } catch (InvalidObjectException|InvalidOperationException e) {
+         throw new MetaException(e.getMessage());
+       }
+     }
+ 
+     /**
+      * create default roles if they don't exist.
+      *
+      * This is a potential contention when HiveServer2 using embedded metastore and Metastore
+      * Server try to concurrently invoke createDefaultRoles. If one failed, JDOException was caught
+      * for one more time try, if failed again, simply ignored by warning, which meant another
+      * succeeds.
+      *
+      * @throws MetaException
+      */
+     private void createDefaultRoles() throws MetaException {
+       try {
+         createDefaultRoles_core();
+       } catch (JDOException e) {
+         LOG.warn("Retrying creating default roles after error: " + e.getMessage(), e);
+         createDefaultRoles_core();
+       }
+     }
+ 
+     private void createDefaultRoles_core() throws MetaException {
+ 
+       RawStore ms = getMS();
+       try {
+         ms.addRole(ADMIN, ADMIN);
+       } catch (InvalidObjectException e) {
+         LOG.debug(ADMIN +" role already exists",e);
+       } catch (NoSuchObjectException e) {
+         // This should never be thrown.
+         LOG.warn("Unexpected exception while adding " +ADMIN+" roles" , e);
+       }
+       LOG.info("Added "+ ADMIN+ " role in metastore");
+       try {
+         ms.addRole(PUBLIC, PUBLIC);
+       } catch (InvalidObjectException e) {
+         LOG.debug(PUBLIC + " role already exists",e);
+       } catch (NoSuchObjectException e) {
+         // This should never be thrown.
+         LOG.warn("Unexpected exception while adding "+PUBLIC +" roles" , e);
+       }
+       LOG.info("Added "+PUBLIC+ " role in metastore");
+       // now grant all privs to admin
+       PrivilegeBag privs = new PrivilegeBag();
+       privs.addToPrivileges(new HiveObjectPrivilege( new HiveObjectRef(HiveObjectType.GLOBAL, null,
+         null, null, null), ADMIN, PrincipalType.ROLE, new PrivilegeGrantInfo("All", 0, ADMIN,
+           PrincipalType.ROLE, true), "SQL"));
+       try {
+         ms.grantPrivileges(privs);
+       } catch (InvalidObjectException e) {
+         // Surprisingly these privs are already granted.
+         LOG.debug("Failed while granting global privs to admin", e);
+       } catch (NoSuchObjectException e) {
+         // Unlikely to be thrown.
+         LOG.warn("Failed while granting global privs to admin", e);
+       }
+     }
+ 
+     /**
+      * add admin users if they don't exist.
+      *
+      * This is a potential contention when HiveServer2 using embedded metastore and Metastore
+      * Server try to concurrently invoke addAdminUsers. If one failed, JDOException was caught for
+      * one more time try, if failed again, simply ignored by warning, which meant another succeeds.
+      *
+      * @throws MetaException
+      */
+     private void addAdminUsers() throws MetaException {
+       try {
+         addAdminUsers_core();
+       } catch (JDOException e) {
+         LOG.warn("Retrying adding admin users after error: " + e.getMessage(), e);
+         addAdminUsers_core();
+       }
+     }
+ 
+     private void addAdminUsers_core() throws MetaException {
+ 
+       // now add pre-configured users to admin role
+       String userStr = MetastoreConf.getVar(conf,ConfVars.USERS_IN_ADMIN_ROLE,"").trim();
+       if (userStr.isEmpty()) {
+         LOG.info("No user is added in admin role, since config is empty");
+         return;
+       }
+       // Since user names need to be valid unix user names, per IEEE Std 1003.1-2001 they cannot
+       // contain comma, so we can safely split above string on comma.
+ 
+      Iterator<String> users = Splitter.on(",").trimResults().omitEmptyStrings().split(userStr).iterator();
+       if (!users.hasNext()) {
+         LOG.info("No user is added in admin role, since config value "+ userStr +
+           " is in incorrect format. We accept comma separated list of users.");
+         return;
+       }
+       Role adminRole;
+       RawStore ms = getMS();
+       try {
+         adminRole = ms.getRole(ADMIN);
+       } catch (NoSuchObjectException e) {
+         LOG.error("Failed to retrieve just added admin role",e);
+         return;
+       }
+       while (users.hasNext()) {
+         String userName = users.next();
+         try {
+           ms.grantRole(adminRole, userName, PrincipalType.USER, ADMIN, PrincipalType.ROLE, true);
+           LOG.info("Added " + userName + " to admin role");
+         } catch (NoSuchObjectException e) {
+           LOG.error("Failed to add "+ userName + " in admin role",e);
+         } catch (InvalidObjectException e) {
+           LOG.debug(userName + " already in admin role", e);
+         }
+       }
+     }
+ 
+     private static void logInfo(String m) {
+       LOG.info(threadLocalId.get().toString() + ": " + m);
+       logAuditEvent(m);
+     }
+ 
+     private String startFunction(String function, String extraLogInfo) {
+       incrementCounter(function);
+       logInfo((getThreadLocalIpAddress() == null ? "" : "source:" + getThreadLocalIpAddress() + " ") +
+           function + extraLogInfo);
+       com.codahale.metrics.Timer timer =
+           Metrics.getOrCreateTimer(MetricsConstants.API_PREFIX + function);
+       if (timer != null) {
+         // Timer will be null we aren't using the metrics
+         timerContexts.get().put(function, timer.time());
+       }
+       Counter counter = Metrics.getOrCreateCounter(MetricsConstants.ACTIVE_CALLS + function);
+       if (counter != null) {
+         counter.inc();
+       }
+       return function;
+     }
+ 
+     private String startFunction(String function) {
+       return startFunction(function, "");
+     }
+ 
+     private void startTableFunction(String function, String catName, String db, String tbl) {
+       startFunction(function, " : tbl=" +
+           TableName.getQualified(catName, db, tbl));
+     }
+ 
+     private void startMultiTableFunction(String function, String db, List<String> tbls) {
+       String tableNames = join(tbls, ",");
+       startFunction(function, " : db=" + db + " tbls=" + tableNames);
+     }
+ 
+     private void startPartitionFunction(String function, String cat, String db, String tbl,
+                                         List<String> partVals) {
+       startFunction(function, " : tbl=" +
+           TableName.getQualified(cat, db, tbl) + "[" + join(partVals, ",") + "]");
+     }
+ 
+     private void startPartitionFunction(String function, String catName, String db, String tbl,
+                                         Map<String, String> partName) {
+       startFunction(function, " : tbl=" +
+           TableName.getQualified(catName, db, tbl) + "partition=" + partName);
+     }
+ 
+     private void endFunction(String function, boolean successful, Exception e) {
+       endFunction(function, successful, e, null);
+     }
+     private void endFunction(String function, boolean successful, Exception e,
+                             String inputTableName) {
+       endFunction(function, new MetaStoreEndFunctionContext(successful, e, inputTableName));
+     }
+ 
+     private void endFunction(String function, MetaStoreEndFunctionContext context) {
+       com.codahale.metrics.Timer.Context timerContext = timerContexts.get().remove(function);
+       if (timerContext != null) {
+         timerContext.close();
+       }
+       Counter counter = Metrics.getOrCreateCounter(MetricsConstants.ACTIVE_CALLS + function);
+       if (counter != null) {
+         counter.dec();
+       }
+ 
+       for (MetaStoreEndFunctionListener listener : endFunctionListeners) {
+         listener.onEndFunction(function, context);
+       }
+     }
+ 
+     @Override
+     public fb_status getStatus() {
+       return fb_status.ALIVE;
+     }
+ 
+     @Override
+     public void shutdown() {
+       cleanupRawStore();
+       PerfLogger.getPerfLogger(false).cleanupPerfLogMetrics();
+     }
+ 
+     @Override
+     public AbstractMap<String, Long> getCounters() {
+       AbstractMap<String, Long> counters = super.getCounters();
+ 
+       // Allow endFunctionListeners to add any counters they have collected
+       if (endFunctionListeners != null) {
+         for (MetaStoreEndFunctionListener listener : endFunctionListeners) {
+           listener.exportCounters(counters);
+         }
+       }
+ 
+       return counters;
+     }
+ 
+     @Override
+     public void create_catalog(CreateCatalogRequest rqst)
+         throws AlreadyExistsException, InvalidObjectException, MetaException {
+       Catalog catalog = rqst.getCatalog();
+       startFunction("create_catalog", ": " + catalog.toString());
+       boolean success = false;
+       Exception ex = null;
+       try {
+         try {
+           getMS().getCatalog(catalog.getName());
+           throw new AlreadyExistsException("Catalog " + catalog.getName() + " already exists");
+         } catch (NoSuchObjectException e) {
+           // expected
+         }
+ 
+         if (!MetaStoreUtils.validateName(catalog.getName(), null)) {
+           throw new InvalidObjectException(catalog.getName() + " is not a valid catalog name");
+         }
+ 
+         if (catalog.getLocationUri() == null) {
+           throw new InvalidObjectException("You must specify a path for the catalog");
+         }
+ 
+         RawStore ms = getMS();
+         Path catPath = new Path(catalog.getLocationUri());
+         boolean madeDir = false;
+         Map<String, String> transactionalListenersResponses = Collections.emptyMap();
+         try {
+           firePreEvent(new PreCreateCatalogEvent(this, catalog));
+           if (!wh.isDir(catPath)) {
+             if (!wh.mkdirs(catPath)) {
+               throw new MetaException("Unable to create catalog path " + catPath +
+                   ", failed to create catalog " + catalog.getName());
+             }
+             madeDir = true;
+           }
+ 
+           ms.openTransaction();
+           ms.createCatalog(catalog);
+ 
+           // Create a default database inside the catalog
+           Database db = new Database(DEFAULT_DATABASE_NAME, "Default database for catalog " +
+                            catalog.getName(), catalog.getLocationUri(), Collections.emptyMap());
+           db.setCatalogName(catalog.getName());
+           create_database_core(ms, db);
+ 
+           if (!transactionalListeners.isEmpty()) {
+             transactionalListenersResponses =
+                 MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                     EventType.CREATE_CATALOG,
+                     new CreateCatalogEvent(true, this, catalog));
+           }
+ 
+           success = ms.commitTransaction();
+         } finally {
+           if (!success) {
+             ms.rollbackTransaction();
+             if (madeDir) {
+               wh.deleteDir(catPath, true, false, false);
+             }
+           }
+ 
+           if (!listeners.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(listeners,
+                 EventType.CREATE_CATALOG,
+                 new CreateCatalogEvent(success, this, catalog),
+                 null,
+                 transactionalListenersResponses, ms);
+           }
+         }
+         success = true;
+       } catch (AlreadyExistsException|InvalidObjectException|MetaException e) {
+         ex = e;
+         throw e;
+       } finally {
+         endFunction("create_catalog", success, ex);
+       }
+     }
+ 
+     @Override
+     public void alter_catalog(AlterCatalogRequest rqst) throws TException {
+       startFunction("alter_catalog " + rqst.getName());
+       boolean success = false;
+       Exception ex = null;
+       RawStore ms = getMS();
+       Map<String, String> transactionalListenersResponses = Collections.emptyMap();
+       GetCatalogResponse oldCat = null;
+ 
+       try {
+         oldCat = get_catalog(new GetCatalogRequest(rqst.getName()));
+         // Above should have thrown NoSuchObjectException if there is no such catalog
+         assert oldCat != null && oldCat.getCatalog() != null;
+         firePreEvent(new PreAlterCatalogEvent(oldCat.getCatalog(), rqst.getNewCat(), this));
+ 
+         ms.openTransaction();
+         ms.alterCatalog(rqst.getName(), rqst.getNewCat());
+ 
+         if (!transactionalListeners.isEmpty()) {
+           transactionalListenersResponses =
+               MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                   EventType.ALTER_CATALOG,
+                   new AlterCatalogEvent(oldCat.getCatalog(), rqst.getNewCat(), true, this));
+         }
+ 
+         success = ms.commitTransaction();
+       } catch (MetaException|NoSuchObjectException e) {
+         ex = e;
+         throw e;
+       } finally {
+         if (!success) {
+           ms.rollbackTransaction();
+         }
+ 
+         if ((null != oldCat) && (!listeners.isEmpty())) {
+           MetaStoreListenerNotifier.notifyEvent(listeners,
+               EventType.ALTER_CATALOG,
+               new AlterCatalogEvent(oldCat.getCatalog(), rqst.getNewCat(), success, this),
+               null, transactionalListenersResponses, ms);
+         }
+         endFunction("alter_catalog", success, ex);
+       }
+ 
+     }
+ 
+     @Override
+     public GetCatalogResponse get_catalog(GetCatalogRequest rqst)
+         throws NoSuchObjectException, TException {
+       String catName = rqst.getName();
+       startFunction("get_catalog", ": " + catName);
+       Catalog cat = null;
+       Exception ex = null;
+       try {
+         cat = getMS().getCatalog(catName);
+         firePreEvent(new PreReadCatalogEvent(this, cat));
+         return new GetCatalogResponse(cat);
+       } catch (MetaException|NoSuchObjectException e) {
+         ex = e;
+         throw e;
+       } finally {
+         endFunction("get_database", cat != null, ex);
+       }
+     }
+ 
+     @Override
+     public GetCatalogsResponse get_catalogs() throws MetaException {
+       startFunction("get_catalogs");
+ 
+       List<String> ret = null;
+       Exception ex = null;
+       try {
+         ret = getMS().getCatalogs();
+       } catch (MetaException e) {
+         ex = e;
+         throw e;
+       } finally {
+         endFunction("get_catalog", ret != null, ex);
+       }
+       return new GetCatalogsResponse(ret == null ? Collections.emptyList() : ret);
+ 
+     }
+ 
+     @Override
+     public void drop_catalog(DropCatalogRequest rqst)
+         throws NoSuchObjectException, InvalidOperationException, MetaException {
+       String catName = rqst.getName();
+       startFunction("drop_catalog", ": " + catName);
+       if (DEFAULT_CATALOG_NAME.equalsIgnoreCase(catName)) {
+         endFunction("drop_catalog", false, null);
+         throw new MetaException("Can not drop " + DEFAULT_CATALOG_NAME + " catalog");
+       }
+ 
+       boolean success = false;
+       Exception ex = null;
+       try {
+         dropCatalogCore(catName);
+         success = true;
+       } catch (NoSuchObjectException|InvalidOperationException|MetaException e) {
+         ex = e;
+         throw e;
+       } catch (Exception e) {
+         ex = e;
+         throw newMetaException(e);
+       } finally {
+         endFunction("drop_catalog", success, ex);
+       }
+ 
+     }
+ 
+     private void dropCatalogCore(String catName)
+         throws MetaException, NoSuchObjectException, InvalidOperationException {
+       boolean success = false;
+       Catalog cat = null;
+       Map<String, String> transactionalListenerResponses = Collections.emptyMap();
+       RawStore ms = getMS();
+       try {
+         ms.openTransaction();
+         cat = ms.getCatalog(catName);
+ 
+         firePreEvent(new PreDropCatalogEvent(this, cat));
+ 
+         List<String> allDbs = get_databases(prependNotNullCatToDbName(catName, null));
+         if (allDbs != null && !allDbs.isEmpty()) {
+           // It might just be the default, in which case we can drop that one if it's empty
+           if (allDbs.size() == 1 && allDbs.get(0).equals(DEFAULT_DATABASE_NAME)) {
+             try {
+               drop_database_core(ms, catName, DEFAULT_DATABASE_NAME, true, false);
+             } catch (InvalidOperationException e) {
+               // This means there are tables of something in the database
+               throw new InvalidOperationException("There are still objects in the default " +
+                   "database for catalog " + catName);
+             } catch (InvalidObjectException|IOException|InvalidInputException e) {
+               MetaException me = new MetaException("Error attempt to drop default database for " +
+                   "catalog " + catName);
+               me.initCause(e);
+               throw me;
+             }
+           } else {
+             throw new InvalidOperationException("There are non-default databases in the catalog " +
+                 catName + " so it cannot be dropped.");
+           }
+         }
+ 
+         ms.dropCatalog(catName) ;
+         if (!transactionalListeners.isEmpty()) {
+           transactionalListenerResponses =
+               MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                   EventType.DROP_CATALOG,
+                   new DropCatalogEvent(true, this, cat));
+         }
+ 
+         success = ms.commitTransaction();
+       } finally {
+         if (success) {
+           wh.deleteDir(wh.getDnsPath(new Path(cat.getLocationUri())), false, false, false);
+         } else {
+           ms.rollbackTransaction();
+         }
+ 
+         if (!listeners.isEmpty()) {
+           MetaStoreListenerNotifier.notifyEvent(listeners,
+               EventType.DROP_CATALOG,
+               new DropCatalogEvent(success, this, cat),
+               null,
+               transactionalListenerResponses, ms);
+         }
+       }
+     }
+ 
+ 
+     // Assumes that the catalog has already been set.
+     private void create_database_core(RawStore ms, final Database db)
+         throws AlreadyExistsException, InvalidObjectException, MetaException {
+       if (!MetaStoreUtils.validateName(db.getName(), null)) {
+         throw new InvalidObjectException(db.getName() + " is not a valid database name");
+       }
+ 
+       Catalog cat = null;
+       try {
+         cat = getMS().getCatalog(db.getCatalogName());
+       } catch (NoSuchObjectException e) {
+         LOG.error("No such catalog " + db.getCatalogName());
+         throw new InvalidObjectException("No such catalog " + db.getCatalogName());
+       }
+       Path dbPath = wh.determineDatabasePath(cat, db);
+       db.setLocationUri(dbPath.toString());
+ 
+       boolean success = false;
+       boolean madeDir = false;
+       Map<String, String> transactionalListenersResponses = Collections.emptyMap();
+       try {
+         firePreEvent(new PreCreateDatabaseEvent(db, this));
+         if (!wh.isDir(dbPath)) {
+           LOG.debug("Creating database path " + dbPath);
+           if (!wh.mkdirs(dbPath)) {
+             throw new MetaException("Unable to create database path " + dbPath +
+                 ", failed to create database " + db.getName());
+           }
+           madeDir = true;
+         }
+ 
+         ms.openTransaction();
+         ms.createDatabase(db);
+ 
+         if (!transactionalListeners.isEmpty()) {
+           transactionalListenersResponses =
+               MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                                                     EventType.CREATE_DATABASE,
+                                                     new CreateDatabaseEvent(db, true, this));
+         }
+ 
+         success = ms.commitTransaction();
+       } finally {
+         if (!success) {
+           ms.rollbackTransaction();
+           if (madeDir) {
+             wh.deleteDir(dbPath, true, db);
+           }
+         }
+ 
+         if (!listeners.isEmpty()) {
+           MetaStoreListenerNotifier.notifyEvent(listeners,
+                                                 EventType.CREATE_DATABASE,
+                                                 new CreateDatabaseEvent(db, success, this),
+                                                 null,
+                                                 transactionalListenersResponses, ms);
+         }
+       }
+     }
+ 
+     @Override
+     public void create_database(final Database db)
+         throws AlreadyExistsException, InvalidObjectException, MetaException {
+       startFunction("create_database", ": " + db.toString());
+       boolean success = false;
+       Exception ex = null;
+       if (!db.isSetCatalogName()) {
+         db.setCatalogName(getDefaultCatalog(conf));
+       }
+       try {
+         try {
+           if (null != get_database_core(db.getCatalogName(), db.getName())) {
+             throw new AlreadyExistsException("Database " + db.getName() + " already exists");
+           }
+         } catch (NoSuchObjectException e) {
+           // expected
+         }
+ 
+         if (TEST_TIMEOUT_ENABLED) {
+           try {
+             Thread.sleep(TEST_TIMEOUT_VALUE);
+           } catch (InterruptedException e) {
+             // do nothing
+           }
+           Deadline.checkTimeout();
+         }
+         create_database_core(getMS(), db);
+         success = true;
+       } catch (MetaException | InvalidObjectException | AlreadyExistsException e) {
+         ex = e;
+         throw e;
+       } catch (Exception e) {
+         ex = e;
+         throw newMetaException(e);
+       } finally {
+         endFunction("create_database", success, ex);
+       }
+     }
+ 
+     @Override
+     public Database get_database(final String name) throws NoSuchObjectException, MetaException {
+       startFunction("get_database", ": " + name);
+       Database db = null;
+       Exception ex = null;
+       try {
+         String[] parsedDbName = parseDbName(name, conf);
+         db = get_database_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME]);
+         firePreEvent(new PreReadDatabaseEvent(db, this));
+       } catch (MetaException|NoSuchObjectException e) {
+         ex = e;
+         throw e;
+       } finally {
+         endFunction("get_database", db != null, ex);
+       }
+       return db;
+     }
+ 
+     @Override
+     public Database get_database_core(String catName, final String name) throws NoSuchObjectException, MetaException {
+       Database db = null;
+       if (name == null) {
+         throw new MetaException("Database name cannot be null.");
+       }
+       try {
+         db = getMS().getDatabase(catName, name);
+       } catch (MetaException | NoSuchObjectException e) {
+         throw e;
+       } catch (Exception e) {
+         assert (e instanceof RuntimeException);
+         throw (RuntimeException) e;
+       }
+       return db;
+     }
+ 
+     @Override
+     public void alter_database(final String dbName, final Database newDB) throws TException {
+       startFunction("alter_database " + dbName);
+       boolean success = false;
+       Exception ex = null;
+       RawStore ms = getMS();
+       Database oldDB = null;
+       Map<String, String> transactionalListenersResponses = Collections.emptyMap();
+ 
+       // Perform the same URI normalization as create_database_core.
+       if (newDB.getLocationUri() != null) {
+         newDB.setLocationUri(wh.getDnsPath(new Path(newDB.getLocationUri())).toString());
+       }
+ 
+       String[] parsedDbName = parseDbName(dbName, conf);
+ 
+       try {
+         oldDB = get_database_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME]);
+         if (oldDB == null) {
+           throw new MetaException("Could not alter database \"" + parsedDbName[DB_NAME] +
+               "\". Could not retrieve old definition.");
+         }
+         firePreEvent(new PreAlterDatabaseEvent(oldDB, newDB, this));
+ 
+         ms.openTransaction();
+         ms.alterDatabase(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], newDB);
+ 
+         if (!transactionalListeners.isEmpty()) {
+           transactionalListenersResponses =
+               MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                   EventType.ALTER_DATABASE,
+                   new AlterDatabaseEvent(oldDB, newDB, true, this));
+         }
+ 
+         success = ms.commitTransaction();
+       } catch (MetaException|NoSuchObjectException e) {
+         ex = e;
+         throw e;
+       } finally {
+         if (!success) {
+           ms.rollbackTransaction();
+         }
+ 
+         if ((null != oldDB) && (!listeners.isEmpty())) {
+           MetaStoreListenerNotifier.notifyEvent(listeners,
+               EventType.ALTER_DATABASE,
+               new AlterDatabaseEvent(oldDB, newDB, success, this),
+               null,
+               transactionalListenersResponses, ms);
+         }
+         endFunction("alter_database", success, ex);
+       }
+     }
+ 
+     private void drop_database_core(RawStore ms, String catName,
+         final String name, final boolean deleteData, final boolean cascade)
+         throws NoSuchObjectException, InvalidOperationException, MetaException,
+         IOException, InvalidObjectException, InvalidInputException {
+       boolean success = false;
+       Database db = null;
+       List<Path> tablePaths = new ArrayList<>();
+       List<Path> partitionPaths = new ArrayList<>();
+       Map<String, String> transactionalListenerResponses = Collections.emptyMap();
+       if (name == null) {
+         throw new MetaException("Database name cannot be null.");
+       }
+       try {
+         ms.openTransaction();
+         db = ms.getDatabase(catName, name);
+ 
+         if (!isInTest && ReplChangeManager.isSourceOfReplication(db)) {
+           throw new InvalidOperationException("can not drop a database which is a source of replication");
+         }
+ 
+         firePreEvent(new PreDropDatabaseEvent(db, this));
+         String catPrependedName = MetaStoreUtils.prependCatalogToDbName(catName, name, conf);
+ 
+         Set<String> uniqueTableNames = new HashSet<>(get_all_tables(catPrependedName));
+         List<String> allFunctions = get_functions(catPrependedName, "*");
+ 
+         if (!cascade) {
+           if (!uniqueTableNames.isEmpty()) {
+             throw new InvalidOperationException(
+                 "Database " + db.getName() + " is not empty. One or more tables exist.");
+           }
+           if (!allFunctions.isEmpty()) {
+             throw new InvalidOperationException(
+                 "Database " + db.getName() + " is not empty. One or more functions exist.");
+           }
+         }
+         Path path = new Path(db.getLocationUri()).getParent();
+         if (!wh.isWritable(path)) {
+           throw new MetaException("Database not dropped since " +
+               path + " is not writable by " +
+               SecurityUtils.getUser());
+         }
+ 
+         Path databasePath = wh.getDnsPath(wh.getDatabasePath(db));
+ 
+         // drop any functions before dropping db
+         for (String funcName : allFunctions) {
+           drop_function(catPrependedName, funcName);
+         }
+ 
+         final int tableBatchSize = MetastoreConf.getIntVar(conf,
+             ConfVars.BATCH_RETRIEVE_MAX);
+ 
+         // First pass will drop the materialized views
+         List<String> materializedViewNames = get_tables_by_type(name, ".*", TableType.MATERIALIZED_VIEW.toString());
+         int startIndex = 0;
+         // retrieve the tables from the metastore in batches to alleviate memory constraints
+         while (startIndex < materializedViewNames.size()) {
+           int endIndex = Math.min(startIndex + tableBatchSize, materializedViewNames.size());
+ 
+           List<Table> materializedViews;
+           try {
+             materializedViews = ms.getTableObjectsByName(catName, name, materializedViewNames.subList(startIndex, endIndex));
+           } catch (UnknownDBException e) {
+             throw new MetaException(e.getMessage());
+           }
+ 
+           if (materializedViews != null && !materializedViews.isEmpty()) {
+             for (Table materializedView : materializedViews) {
+               if (materializedView.getSd().getLocation() != null) {
+                 Path materializedViewPath = wh.getDnsPath(new Path(materializedView.getSd().getLocation()));
+                 if (!wh.isWritable(materializedViewPath.getParent())) {
+                   throw new MetaException("Database metadata not deleted since table: " +
+                       materializedView.getTableName() + " has a parent location " + materializedViewPath.getParent() +
+                       " which is not writable by " + SecurityUtils.getUser());
+                 }
+ 
+                 if (!FileUtils.isSubdirectory(databasePath.toString(),
+                     materializedViewPath.toString())) {
+                   tablePaths.add(materializedViewPath);
+                 }
+               }
+               // Drop the materialized view but not its data
+               drop_table(name, materializedView.getTableName(), false);
+               // Remove from all tables
+               uniqueTableNames.remove(materializedView.getTableName());
+             }
+           }
+           startIndex = endIndex;
+         }
+ 
+         // drop tables before dropping db
+         List<String> allTables = new ArrayList<>(uniqueTableNames);
+         startIndex = 0;
+         // retrieve the tables from the metastore in batches to alleviate memory constraints
+         while (startIndex < allTables.size()) {
+           int endIndex = Math.min(startIndex + tableBatchSize, allTables.size());
+ 
+           List<Table> tables;
+           try {
+             tables = ms.getTableObjectsByName(catName, name, allTables.subList(startIndex, endIndex));
+           } catch (UnknownDBException e) {
+             throw new MetaException(e.getMessage());
+           }
+ 
+           if (tables != null && !tables.isEmpty()) {
+             for (Table table : tables) {
+ 
+               // If the table is not external and it might not be in a subdirectory of the database
+               // add it's locations to the list of paths to delete
+               Path tablePath = null;
+               boolean tableDataShouldBeDeleted = checkTableDataShouldBeDeleted(table, deleteData);
+               if (table.getSd().getLocation() != null && tableDataShouldBeDeleted) {
+                 tablePath = wh.getDnsPath(new Path(table.getSd().getLocation()));
+                 if (!wh.isWritable(tablePath.getParent())) {
+                   throw new MetaException("Database metadata not deleted since table: " +
+                       table.getTableName() + " has a parent location " + tablePath.getParent() +
+                       " which is not writable by " + SecurityUtils.getUser());
+                 }
+ 
+                 if (!FileUtils.isSubdirectory(databasePath.toString(), tablePath.toString())) {
+                   tablePaths.add(tablePath);
+                 }
+               }
+ 
+               // For each partition in each table, drop the partitions and get a list of
+               // partitions' locations which might need to be deleted
+               partitionPaths = dropPartitionsAndGetLocations(ms, catName, name, table.getTableName(),
+                   tablePath, tableDataShouldBeDeleted);
+ 
+               // Drop the table but not its data
+               drop_table(MetaStoreUtils.prependCatalogToDbName(table.getCatName(), table.getDbName(), conf),
+                   table.getTableName(), false);
+             }
+ 
+             startIndex = endIndex;
+           }
+         }
+ 
+         if (ms.dropDatabase(catName, name)) {
+           if (!transactionalListeners.isEmpty()) {
+             transactionalListenerResponses =
+                 MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                                                       EventType.DROP_DATABASE,
+                                                       new DropDatabaseEvent(db, true, this));
+           }
+ 
+           success = ms.commitTransaction();
+         }
+       } finally {
+         if (!success) {
+           ms.rollbackTransaction();
+         } else if (deleteData) {
+           // Delete the data in the partitions which have other locations
+           deletePartitionData(partitionPaths, false, db);
+           // Delete the data in the tables which have other locations
+           for (Path tablePath : tablePaths) {
+             deleteTableData(tablePath, false, db);
+           }
+           // Delete the data in the database
+           try {
+             wh.deleteDir(new Path(db.getLocationUri()), true, db);
+           } catch (Exception e) {
+             LOG.error("Failed to delete database directory: " + db.getLocationUri() +
+                 " " + e.getMessage());
+           }
+           // it is not a terrible thing even if the data is not deleted
+         }
+ 
+         if (!listeners.isEmpty()) {
+           MetaStoreListenerNotifier.notifyEvent(listeners,
+                                                 EventType.DROP_DATABASE,
+                                                 new DropDatabaseEvent(db, success, this),
+                                                 null,
+                                                 transactionalListenerResponses, ms);
+         }
+       }
+     }
+ 
+     @Override
+     public void drop_database(final String dbName, final boolean deleteData, final boolean cascade)
+         throws NoSuchObjectException, InvalidOperationException, MetaException {
+       startFunction("drop_database", ": " + dbName);
+       String[] parsedDbName = parseDbName(dbName, conf);
+       if (DEFAULT_CATALOG_NAME.equalsIgnoreCase(parsedDbName[CAT_NAME]) &&
+           DEFAULT_DATABASE_NAME.equalsIgnoreCase(parsedDbName[DB_NAME])) {
+         endFunction("drop_database", false, null);
+         throw new MetaException("Can not drop " + DEFAULT_DATABASE_NAME + " database in catalog "
+             + DEFAULT_CATALOG_NAME);
+       }
+ 
+       boolean success = false;
+       Exception ex = null;
+       try {
+         drop_database_core(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME], deleteData,
+             cascade);
+         success = true;
+       } catch (NoSuchObjectException|InvalidOperationException|MetaException e) {
+         ex = e;
+         throw e;
+       } catch (Exception e) {
+         ex = e;
+         throw newMetaException(e);
+       } finally {
+         endFunction("drop_database", success, ex);
+       }
+     }
+ 
+ 
+     @Override
+     public List<String> get_databases(final String pattern) throws MetaException {
+       startFunction("get_databases", ": " + pattern);
+ 
+       String[] parsedDbNamed = parseDbName(pattern, conf);
+       List<String> ret = null;
+       Exception ex = null;
+       try {
+         if (parsedDbNamed[DB_NAME] == null) {
+           ret = getMS().getAllDatabases(parsedDbNamed[CAT_NAME]);
+         } else {
+           ret = getMS().getDatabases(parsedDbNamed[CAT_NAME], parsedDbNamed[DB_NAME]);
+         }
+       } catch (MetaException e) {
+         ex = e;
+         throw e;
+       } catch (Exception e) {
+         ex = e;
+         throw newMetaException(e);
+       } finally {
+         endFunction("get_databases", ret != null, ex);
+       }
+       return ret;
+     }
+ 
+     @Override
+     public List<String> get_all_databases() throws MetaException {
+       return get_databases(MetaStoreUtils.prependCatalogToDbName(null, null, conf));
+     }
+ 
+     private void create_type_core(final RawStore ms, final Type type)
+         throws AlreadyExistsException, MetaException, InvalidObjectException {
+       if (!MetaStoreUtils.validateName(type.getName(), null)) {
+         throw new InvalidObjectException("Invalid type name");
+       }
+ 
+       boolean success = false;
+       try {
+         ms.openTransaction();
+         if (is_type_exists(ms, type.getName())) {
+           throw new AlreadyExistsException("Type " + type.getName() + " already exists");
+         }
+         ms.createType(type);
+         success = ms.commitTransaction();
+       } finally {
+         if (!success) {
+           ms.rollbackTransaction();
+         }
+       }
+     }
+ 
+     @Override
+     public boolean create_type(final Type type) throws AlreadyExistsException,
+         MetaException, InvalidObjectException {
+       startFunction("create_type", ": " + type.toString());
+       boolean success = false;
+       Exception ex = null;
+       try {
+         create_type_core(getMS(), type);
+         success = true;
+       } catch (MetaException | InvalidObjectException | AlreadyExistsException e) {
+         ex = e;
+         throw e;
+       } catch (Exception e) {
+         ex = e;
+         throw newMetaException(e);
+       } finally {
+         endFunction("create_type", success, ex);
+       }
+ 
+       return success;
+     }
+ 
+     @Override
+     public Type get_type(final String name) throws MetaException, NoSuchObjectException {
+       startFunction("get_type", ": " + name);
+ 
+       Type ret = null;
+       Exception ex = null;
+       try {
+         ret = getMS().getType(name);
+         if (null == ret) {
+           throw new NoSuchObjectException("Type \"" + name + "\" not found.");
+         }
+       } catch (Exception e) {
+         ex = e;
+         throwMetaException(e);
+       } finally {
+         endFunction("get_type", ret != null, ex);
+       }
+       return ret;
+     }
+ 
+     private boolean is_type_exists(RawStore ms, String typeName)
+         throws MetaException {
+       return (ms.getType(typeName) != null);
+     }
+ 
+     @Override
+     public boolean drop_type(final String name) throws MetaException, NoSuchObjectException {
+       startFunction("drop_type", ": " + name);
+ 
+       boolean success = false;
+       Exception ex = null;
+       try {
+         // TODO:pc validate that there are no types that refer to this
+         success = getMS().dropType(name);
+       } catch (Exception e) {
+         ex = e;
+         throwMetaException(e);
+       } finally {
+         endFunction("drop_type", success, ex);
+       }
+       return success;
+     }
+ 
+     @Override
+     public Map<String, Type> get_type_all(String name) throws MetaException {
+       // TODO Auto-generated method stub
+       startFunction("get_type_all", ": " + name);
+       endFunction("get_type_all", false, null);
+       throw new MetaException("Not yet implemented");
+     }
+ 
+     private void create_table_core(final RawStore ms, final Table tbl,
+         final EnvironmentContext envContext)
+             throws AlreadyExistsException, MetaException,
+             InvalidObjectException, NoSuchObjectException {
+       create_table_core(ms, tbl, envContext, null, null, null, null, null, null);
+     }
+ 
+     private void create_table_core(final RawStore ms, final Table tbl,
+         final EnvironmentContext envContext, List<SQLPrimaryKey> primaryKeys,
+         List<SQLForeignKey> foreignKeys, List<SQLUniqueConstraint> uniqueConstraints,
+         List<SQLNotNullConstraint> notNullConstraints, List<SQLDefaultConstraint> defaultConstraints,
+                                    List<SQLCheckConstraint> checkConstraints)
+         throws AlreadyExistsException, MetaException,
+         InvalidObjectException, NoSuchObjectException {
+       // To preserve backward compatibility throw MetaException in case of null database
+       if (tbl.getDbName() == null) {
+         throw new MetaException("Null database name is not allowed");
+       }
+ 
+       if (!MetaStoreUtils.validateName(tbl.getTableName(), conf)) {
+         throw new InvalidObjectException(tbl.getTableName()
+             + " is not a valid object name");
+       }
+       String validate = MetaStoreUtils.validateTblColumns(tbl.getSd().getCols());
+       if (validate != null) {
+         throw new InvalidObjectException("Invalid column " + validate);
+       }
+       if (tbl.getPartitionKeys() != null) {
+         validate = MetaStoreUtils.validateTblColumns(tbl.getPartitionKeys());
+         if (validate != null) {
+           throw new InvalidObjectException("Invalid partition column " + validate);
+         }
+       }
+       SkewedInfo skew = tbl.getSd().getSkewedInfo();
+       if (skew != null) {
+         validate = MetaStoreUtils.validateSkewedColNames(skew.getSkewedColNames());
+         if (validate != null) {
+           throw new InvalidObjectException("Invalid skew column " + validate);
+         }
+         validate = MetaStoreUtils.validateSkewedColNamesSubsetCol(
+             skew.getSkewedColNames(), tbl.getSd().getCols());
+         if (validate != null) {
+           throw new InvalidObjectException("Invalid skew column " + validate);
+         }
+       }
+ 
+       Map<String, String> transactionalListenerResponses = Collections.emptyMap();
+       Path tblPath = null;
+       boolean success = false, madeDir = false;
+       Database db = null;
+       try {
+         if (!tbl.isSetCatName()) {
+           tbl.setCatName(getDefaultCatalog(conf));
+         }
+         firePreEvent(new PreCreateTableEvent(tbl, this));
+ 
+         ms.openTransaction();
+ 
+         db = ms.getDatabase(tbl.getCatName(), tbl.getDbName());
+ 
+         // get_table checks whether database exists, it should be moved here
+         if (is_table_exists(ms, tbl.getCatName(), tbl.getDbName(), tbl.getTableName())) {
+           throw new AlreadyExistsException("Table " + getCatalogQualifiedTableName(tbl)
+               + " already exists");
+         }
+ 
+         if (!TableType.VIRTUAL_VIEW.toString().equals(tbl.getTableType())) {
+           if (tbl.getSd().getLocation() == null
+               || tbl.getSd().getLocation().isEmpty()) {
+             tblPath = wh.getDefaultTablePath(db, tbl);
+           } else {
+             if (!isExternal(tbl) && !MetaStoreUtils.isNonNativeTable(tbl)) {
+               LOG.warn("Location: " + tbl.getSd().getLocation()
+                   + " specified for non-external table:" + tbl.getTableName());
+             }
+             tblPath = wh.getDnsPath(new Path(tbl.getSd().getLocation()));
+           }
+           tbl.getSd().setLocation(tblPath.toString());
+         }
+ 
+         if (tblPath != null) {
+           if (!wh.isDir(tblPath)) {
+             if (!wh.mkdirs(tblPath)) {
+               throw new MetaException(tblPath
+                   + " is not a directory or unable to create one");
+             }
+             madeDir = true;
+           }
+         }
+         if (MetastoreConf.getBoolVar(conf, ConfVars.STATS_AUTO_GATHER) &&
+             !MetaStoreUtils.isView(tbl)) {
+           MetaStoreUtils.updateTableStatsSlow(db, tbl, wh, madeDir, false, envContext);
+         }
+ 
+         // set create time
+         long time = System.currentTimeMillis() / 1000;
+         tbl.setCreateTime((int) time);
+         if (tbl.getParameters() == null ||
+             tbl.getParameters().get(hive_metastoreConstants.DDL_TIME) == null) {
+           tbl.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(time));
+         }
+ 
+         if (primaryKeys == null && foreignKeys == null
+                 && uniqueConstraints == null && notNullConstraints == null && defaultConstraints == null
+             && checkConstraints == null) {
+           ms.createTable(tbl);
+         } else {
+           // Check that constraints have catalog name properly set first
+           if (primaryKeys != null && !primaryKeys.isEmpty() && !primaryKeys.get(0).isSetCatName()) {
+             for (SQLPrimaryKey pkcol : primaryKeys) pkcol.setCatName(tbl.getCatName());
+           }
+           if (foreignKeys != null && !foreignKeys.isEmpty() && !foreignKeys.get(0).isSetCatName()) {
+             for (SQLForeignKey fkcol : foreignKeys) fkcol.setCatName(tbl.getCatName());
+           }
+           if (uniqueConstraints != null && !uniqueConstraints.isEmpty() && !uniqueConstraints.get(0).isSetCatName()) {
+             for (SQLUniqueConstraint uccol : uniqueConstraints) uccol.setCatName(tbl.getCatName());
+           }
+           if (notNullConstraints != null && !notNullConstraints.isEmpty() && !notNullConstraints.get(0).isSetCatName()) {
+             for (SQLNotNullConstraint nncol : notNullConstraints) nncol.setCatName(tbl.getCatName());
+           }
+           if (defaultConstraints != null && !defaultConstraints.isEmpty() && !defaultConstraints.get(0).isSetCatName()) {
+             for (SQLDefaultConstraint dccol : defaultConstraints) dccol.setCatName(tbl.getCatName());
+           }
+           if (checkConstraints != null && !checkConstraints.isEmpty() && !checkConstraints.get(0).isSetCatName()) {
+             for (SQLCheckConstraint cccol : checkConstraints) cccol.setCatName(tbl.getCatName());
+           }
+           // Set constraint name if null before sending to listener
+           List<String> constraintNames = ms.createTableWithConstraints(tbl, primaryKeys, foreignKeys,
+               uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
+           int primaryKeySize = 0;
+           if (primaryKeys != null) {
+             primaryKeySize = primaryKeys.size();
+             for (int i = 0; i < primaryKeys.size(); i++) {
+               if (primaryKeys.get(i).getPk_name() == null) {
+                 primaryKeys.get(i).setPk_name(constraintNames.get(i));
+               }
+               if (!primaryKeys.get(i).isSetCatName()) primaryKeys.get(i).setCatName(tbl.getCatName());
+             }
+           }
+           int foreignKeySize = 0;
+           if (foreignKeys != null) {
+             foreignKeySize = foreignKeys.size();
+             for (int i = 0; i < foreignKeySize; i++) {
+               if (foreignKeys.get(i).getFk_name() == null) {
+                 foreignKeys.get(i).setFk_name(constraintNames.get(primaryKeySize + i));
+               }
+               if (!foreignKeys.get(i).isSetCatName()) foreignKeys.get(i).setCatName(tbl.getCatName());
+             }
+           }
+           int uniqueConstraintSize = 0;
+           if (uniqueConstraints != null) {
+             uniqueConstraintSize = uniqueConstraints.size();
+             for (int i = 0; i < uniqueConstraintSize; i++) {
+               if (uniqueConstraints.get(i).getUk_name() == null) {
+                 uniqueConstraints.get(i).setUk_name(constraintNames.get(primaryKeySize + foreignKeySize + i));
+               }
+               if (!uniqueConstraints.get(i).isSetCatName()) uniqueConstraints.get(i).setCatName(tbl.getCatName());
+             }
+           }
+           int notNullConstraintSize =  0;
+           if (notNullConstraints != null) {
+             for (int i = 0; i < notNullConstraints.size(); i++) {
+               if (notNullConstraints.get(i).getNn_name() == null) {
+                 notNullConstraints.get(i).setNn_name(constraintNames.get(primaryKeySize + foreignKeySize + uniqueConstraintSize + i));
+               }
+               if (!notNullConstraints.get(i).isSetCatName()) notNullConstraints.get(i).setCatName(tbl.getCatName());
+             }
+           }
+           int defaultConstraintSize =  0;
+           if (defaultConstraints!= null) {
+             for (int i = 0; i < defaultConstraints.size(); i++) {
+               if (defaultConstraints.get(i).getDc_name() == null) {
+                 defaultConstraints.get(i).setDc_name(constraintNames.get(primaryKeySize + foreignKeySize
+                     + uniqueConstraintSize + notNullConstraintSize + i));
+               }
+               if (!defaultConstraints.get(i).isSetCatName()) defaultConstraints.get(i).setCatName(tbl.getCatName());
+             }
+           }
+           if (checkConstraints!= null) {
+             for (int i = 0; i < checkConstraints.size(); i++) {
+               if (checkConstraints.get(i).getDc_name() == null) {
+                 checkConstraints.get(i).setDc_name(constraintNames.get(primaryKeySize + foreignKeySize
+                                                                              + uniqueConstraintSize
+                                                                              + defaultConstraintSize
+                                                                            + notNullConstraintSize + i));
+               }
+               if (!checkConstraints.get(i).isSetCatName()) checkConstraints.get(i).setCatName(tbl.getCatName());
+             }
+           }
+         }
+ 
+         if (!transactionalListeners.isEmpty()) {
+           transactionalListenerResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+               EventType.CREATE_TABLE, new CreateTableEvent(tbl, true, this), envContext);
+           if (primaryKeys != null && !primaryKeys.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_PRIMARYKEY,
+                 new AddPrimaryKeyEvent(primaryKeys, true, this), envContext);
+           }
+           if (foreignKeys != null && !foreignKeys.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_FOREIGNKEY,
+                 new AddForeignKeyEvent(foreignKeys, true, this), envContext);
+           }
+           if (uniqueConstraints != null && !uniqueConstraints.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_UNIQUECONSTRAINT,
+                 new AddUniqueConstraintEvent(uniqueConstraints, true, this), envContext);
+           }
+           if (notNullConstraints != null && !notNullConstraints.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_NOTNULLCONSTRAINT,
+                 new AddNotNullConstraintEvent(notNullConstraints, true, this), envContext);
+           }
+         }
+ 
+         success = ms.commitTransaction();
+       } finally {
+         if (!success) {
+           ms.rollbackTransaction();
+           if (madeDir) {
+             wh.deleteDir(tblPath, true, db);
+           }
+         }
+ 
+         if (!listeners.isEmpty()) {
+           MetaStoreListenerNotifier.notifyEvent(listeners, EventType.CREATE_TABLE,
+               new CreateTableEvent(tbl, success, this), envContext, transactionalListenerResponses, ms);
+           if (primaryKeys != null && !primaryKeys.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_PRIMARYKEY,
+                 new AddPrimaryKeyEvent(primaryKeys, success, this), envContext);
+           }
+           if (foreignKeys != null && !foreignKeys.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_FOREIGNKEY,
+                 new AddForeignKeyEvent(foreignKeys, success, this), envContext);
+           }
+           if (uniqueConstraints != null && !uniqueConstraints.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_UNIQUECONSTRAINT,
+                 new AddUniqueConstraintEvent(uniqueConstraints, success, this), envContext);
+           }
+           if (notNullConstraints != null && !notNullConstraints.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_NOTNULLCONSTRAINT,
+                 new AddNotNullConstraintEvent(notNullConstraints, success, this), envContext);
+           }
+         }
+       }
+     }
+ 
+     @Override
+     public void create_table(final Table tbl) throws AlreadyExistsException,
+         MetaException, InvalidObjectException {
+       create_table_with_environment_context(tbl, null);
+     }
+ 
+     @Override
+     public void create_table_with_environment_context(final Table tbl,
+         final EnvironmentContext envContext)
+         throws AlreadyExistsException, MetaException, InvalidObjectException {
+       startFunction("create_table", ": " + tbl.toString());
+       boolean success = false;
+       Exception ex = null;
+       try {
+         create_table_core(getMS(), tbl, envContext);
+         success = true;
+       } catch (NoSuchObjectException e) {
+         LOG.warn("create_table_with_environment_context got ", e);
+         ex = e;
+         throw new InvalidObjectException(e.getMessage());
+       } catch (MetaException | InvalidObjectException | AlreadyExistsException e) {
+         ex = e;
+         throw e;
+       } catch (Exception e) {
+         ex = e;
+         throw newMetaException(e);
+       } finally {
+         endFunction("create_table", success, ex, tbl.getTableName());
+       }
+     }
+ 
+     @Override
+     public void create_table_with_constraints(final Table tbl,
+         final List<SQLPrimaryKey> primaryKeys, final List<SQLForeignKey> foreignKeys,
+         List<SQLUniqueConstraint> uniqueConstraints,
+         List<SQLNotNullConstraint> notNullConstraints,
+         List<SQLDefaultConstraint> defaultConstraints,
+         List<SQLCheckConstraint> checkConstraints)
+         throws AlreadyExistsException, MetaException, InvalidObjectException {
+       startFunction("create_table", ": " + tbl.toString());
+       boolean success = false;
+       Exception ex = null;
+       try {
+         create_table_core(getMS(), tbl, null, primaryKeys, foreignKeys,
+             uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
+         success = true;
+       } catch (NoSuchObjectException e) {
+         ex = e;
+         throw new InvalidObjectException(e.getMessage());
+       } catch (MetaException | InvalidObjectException | AlreadyExistsException e) {
+         ex = e;
+         throw e;
+       } catch (Exception e) {
+         ex = e;
+         throw newMetaException(e);
+       } finally {
+         endFunction("create_table", success, ex, tbl.getTableName());
+       }
+     }
+ 
+     @Override
+     public void drop_constraint(DropConstraintRequest req)
+         throws MetaException, InvalidObjectException {
+       String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf);
+       String dbName = req.getDbname();
+       String tableName = req.getTablename();
+       String constraintName = req.getConstraintname();
+       startFunction("drop_constraint", ": " + constraintName);
+       boolean success = false;
+       Exception ex = null;
+       RawStore ms = getMS();
+       try {
+         ms.openTransaction();
+         ms.dropConstraint(catName, dbName, tableName, constraintName);
+         if (transactionalListeners.size() > 0) {
+           DropConstraintEvent dropConstraintEvent = new DropConstraintEvent(catName, dbName,
+               tableName, constraintName, true, this);
+           for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+             transactionalListener.onDropConstraint(dropConstraintEvent);
+           }
+         }
+         success = ms.commitTransaction();
+       } catch (NoSuchObjectException e) {
+         ex = e;
+         throw new InvalidObjectException(e.getMessage());
+       } catch (MetaException e)

<TRUNCATED>

[14/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/resources/package.jdo
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/resources/package.jdo
index 0000000,5fb548c..70150da
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/resources/package.jdo
+++ b/standalone-metastore/metastore-server/src/main/resources/package.jdo
@@@ -1,0 -1,1420 +1,1426 @@@
+ <?xml version="1.0"?>
+ <!--
+   Licensed to the Apache Software Foundation (ASF) under one
+   or more contributor license agreements.  See the NOTICE file
+   distributed with this work for additional information
+   regarding copyright ownership.  The ASF licenses this file
+   to you under the Apache License, Version 2.0 (the
+   "License"); you may not use this file except in compliance
+   with the License.  You may obtain a copy of the License at
+  
+       http://www.apache.org/licenses/LICENSE-2.0
+  
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+ --> 
+ <!DOCTYPE jdo PUBLIC "-//Sun Microsystems, Inc.//DTD Java Data Objects Metadata 2.0//EN"
+   "http://java.sun.com/dtd/jdo_2_0.dtd">
+ <!--
+   Size Limitations:
+ 
+   Indexed VARCHAR: 767 bytes (MySQL running on InnoDB Engine http://bugs.mysql.com/bug.php?id=13315)
+   Non-indexed VARCHAR: 4000 bytes (max length on Oracle 9i/10g/11g)
+ 
+ -->
+ <jdo>
+   <package name="org.apache.hadoop.hive.metastore.model">
+     <class name="MDatabase" identity-type="datastore" table="DBS" detachable="true">  
+       <datastore-identity>
+         <column name="DB_ID"/>
+       </datastore-identity>
+       <index name="UniqueDatabase" unique="true">
+         <column name="NAME"/>
+         <column name="CTLG_NAME"/>
+       </index>
+       <field name="name">  
+         <column name="NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="catalogName">
+         <column name="CTLG_NAME" length="256" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="description">
+         <column name="DESC" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="locationUri">
+         <column name="DB_LOCATION_URI" length="4000" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="parameters" table="DATABASE_PARAMS">
+         <map key-type="java.lang.String" value-type="java.lang.String"/>
+         <join>
+           <column name="DB_ID"/>
+         </join>
+         <key>
+            <column name="PARAM_KEY" length="180" jdbc-type="VARCHAR"/>
+         </key>
+         <value>
+            <column name="PARAM_VALUE" length="4000" jdbc-type="VARCHAR"/>
+         </value>
+       </field>
+       <field name="ownerName">    
+         <column name="OWNER_NAME" length="128" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+        <field name="ownerType">
+         <column name="OWNER_TYPE" length="10" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+     </class>
+ 
+     <class name="MCatalog" identity-type="datastore" table="CTLGS" detachable="true">
+       <datastore-identity>
+         <column name="CTLG_ID"/>
+       </datastore-identity>
+       <field name="name">
+         <column name="NAME" length="256" jdbc-type="VARCHAR"/>
+         <index name="UniqueCatalog" unique="true"/>
+       </field>
+       <field name="description">
+         <column name="DESC" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="locationUri">
+         <column name="LOCATION_URI" length="4000" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+     </class>
+ 
+     <class name="MFieldSchema" embedded-only="true" table="TYPE_FIELDS" detachable="true">
+       <field name="name">
+         <column name="FNAME" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="type" >
+         <column name="FTYPE" length="32672" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="comment" >
+         <column name="FCOMMENT" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+     </class>
+ 
+     <class name="MType" table="TYPES" detachable="true">  
+       <field name="name" >  
+         <column name="TYPE_NAME" length="128" jdbc-type="VARCHAR"/>  
+         <index name="UniqueType" unique="true"/>
+       </field>
+       <field name="type1">  
+         <column name="TYPE1" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="type2">  
+         <column name="TYPE2" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="fields" table="TYPE_FIELDS" >
+         <collection element-type="MFieldSchema"/>
+         <join>
+           <primary-key name="TYPE_FIELDS_PK">
+             <column name="TYPE_NAME"/>
+             <column name="FIELD_NAME"/>
+           </primary-key>
+           <column name="TYPE_NAME"/>
+         </join>
+         <element>
+           <embedded>
+             <field name="name">
+               <column name="FIELD_NAME" length="128" jdbc-type="VARCHAR"/>
+             </field>
+             <field name="type">
+               <column name="FIELD_TYPE" length="767" jdbc-type="VARCHAR"  allows-null="false"/>
+             </field>
+             <field name="comment" >
+               <column name="COMMENT" length="256" jdbc-type="VARCHAR" allows-null="true"/>
+             </field>
+           </embedded>
+         </element>
+       </field>
+     </class>
+ 
+     <class name="MTable" table="TBLS" identity-type="datastore" detachable="true">
+       <datastore-identity>
+         <column name="TBL_ID"/>
+       </datastore-identity>
+       <index name="UniqueTable" unique="true">
+         <column name="TBL_NAME"/>
+         <column name="DB_ID"/>
+       </index>
+       <field name="tableName">
+         <column name="TBL_NAME" length="256" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="database">
+         <column name="DB_ID"/>
+       </field>
+       <field name="partitionKeys" table="PARTITION_KEYS" >
+         <collection element-type="MFieldSchema"/>
+         <join>
+           <primary-key name="PARTITION_KEY_PK">
+             <column name="TBL_ID"/>
+             <column name="PKEY_NAME"/>
+           </primary-key>
+           <column name="TBL_ID"/>
+         </join>
+         <element>
+           <embedded>
+             <field name="name">
+               <column name="PKEY_NAME" length="128" jdbc-type="VARCHAR"/>
+               </field>
+             <field name="type">
+               <column name="PKEY_TYPE" length="767" jdbc-type="VARCHAR" allows-null="false"/>
+             </field>
+             <field name="comment" >
+               <column name="PKEY_COMMENT" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+             </field>
+           </embedded>
+         </element>
+       </field>
+       <field name="sd" dependent="true">
+         <column name="SD_ID"/>
+       </field>
+       <field name="owner">
+         <column name="OWNER" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="ownerType">
+         <column name="OWNER_TYPE" length="10" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="lastAccessTime">
+         <column name="LAST_ACCESS_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="retention">
+         <column name="RETENTION" jdbc-type="integer"/>
+       </field>
+       <field name="parameters" table="TABLE_PARAMS">
+         <map key-type="java.lang.String" value-type="java.lang.String"/>
+         <join>
+           <column name="TBL_ID"/>
+         </join>
+         <key>
+            <column name="PARAM_KEY" length="256" jdbc-type="VARCHAR"/>
+         </key>
+         <value>
+            <column name="PARAM_VALUE" length="32672" jdbc-type="VARCHAR"/>
+         </value>
+       </field>
+       <field name="viewOriginalText" default-fetch-group="false">
+         <column name="VIEW_ORIGINAL_TEXT" jdbc-type="LONGVARCHAR"/>
+       </field>
+       <field name="viewExpandedText" default-fetch-group="false">
+         <column name="VIEW_EXPANDED_TEXT" jdbc-type="LONGVARCHAR"/>
+       </field>
+       <field name="rewriteEnabled">
+         <column name="IS_REWRITE_ENABLED"/>
+       </field>
+       <field name="tableType">
+         <column name="TBL_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
++      <field name="writeId">
++        <column name="WRITE_ID"/>
++      </field>
+     </class>
+ 
+     <class name="MCreationMetadata" identity-type="datastore" table="MV_CREATION_METADATA" detachable="true">
+       <datastore-identity>
+         <column name="MV_CREATION_METADATA_ID"/>
+       </datastore-identity>
+       <field name="catalogName">
+         <column name="CAT_NAME" length="256" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="dbName">
+         <column name="DB_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="tblName">
+         <column name="TBL_NAME" length="256" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="tables" table="MV_TABLES_USED">
+         <collection element-type="MTable"/>
+         <join>
+           <column name="MV_CREATION_METADATA_ID"/>
+         </join>
+         <element column="TBL_ID"/>
+       </field>
+       <field name="txnList">
+         <column name="TXN_LIST" jdbc-type="CLOB" allows-null="true"/>
+       </field>
+     </class>
+ 
+     <class name="MConstraint" identity-type="application" table="KEY_CONSTRAINTS" detachable="true" objectid-class="MConstraint$PK">
+       <field name="constraintName" primary-key="true">
+         <column name="CONSTRAINT_NAME"/>
+       </field>
+       <field name="position" primary-key="true">
+         <column name="POSITION"/>
+       </field>
+       <field name="childColumn">
+         <column name="CHILD_CD_ID"/>
+       </field>
+       <field name="childIntegerIndex">
+         <column name="CHILD_INTEGER_IDX"/>
+       </field>
+       <field name="childTable">
+         <column name="CHILD_TBL_ID"/>
+       </field>
+       <field name="parentColumn">
+         <column name="PARENT_CD_ID"/>
+       </field>
+       <field name="parentIntegerIndex">
+     <column name="PARENT_INTEGER_IDX"/>
+       </field>
+       <field name="parentTable">
+         <column name="PARENT_TBL_ID"/>
+       </field>
+       <field name="constraintType">
+         <column name="CONSTRAINT_TYPE"/>
+       </field>
+       <field name="deleteRule">
+         <column name="DELETE_RULE"/>
+       </field>
+       <field name="updateRule">
+         <column name="UPDATE_RULE"/>
+       </field>
+       <field name="enableValidateRely">
+         <column name="ENABLE_VALIDATE_RELY"/>
+       </field>
+     </class>
+ 
+     <class name="MSerDeInfo" identity-type="datastore" table="SERDES" detachable="true">
+       <datastore-identity>
+         <column name="SERDE_ID"/>
+       </datastore-identity>
+       <field name="name">
+         <column name="NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="serializationLib">
+         <column name="SLIB" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="parameters" table="SERDE_PARAMS">
+         <map key-type="java.lang.String" value-type="java.lang.String"/>
+         <join>
+           <column name="SERDE_ID"/>
+         </join>
+         <key>
+            <column name="PARAM_KEY" length="256" jdbc-type="VARCHAR"/>
+         </key>
+         <value>
+            <column name="PARAM_VALUE" length="32672" jdbc-type="VARCHAR"/>
+         </value>
+       </field>
+       <field name="description">
+         <column name="DESCRIPTION" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="serializerClass">
+         <column name="SERIALIZER_CLASS" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="deserializerClass">
+         <column name="DESERIALIZER_CLASS" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="serdeType">
+         <column name="SERDE_TYPE" jdbc-type="integer" allows-null="true"/>
+       </field>
+     </class>
+ 
+     <class name="MOrder" embedded-only="true" table="SORT_ORDER" detachable="true">
+       <field name="col">
+         <column name="COL_NAME" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="order">
+         <column name="ORDER" jdbc-type="INTEGER"  allows-null="false"/>
+       </field>
+     </class>
+ 
+     <class name="MColumnDescriptor" identity-type="datastore" table="CDS" detachable="true">
+       <datastore-identity>
+         <column name="CD_ID"/>
+       </datastore-identity>
+       <field name="cols" table="COLUMNS_V2" >
+         <collection element-type="MFieldSchema"/>
+         <join>
+           <primary-key name="COLUMNS_PK">
+             <column name="COLUMN_NAME"/>
+           </primary-key>
+           <column name="CD_ID"/>
+         </join>
+         <element>
+           <embedded>
+             <field name="name">
+               <column name="COLUMN_NAME" length="767" jdbc-type="VARCHAR"/>
+               </field>
+             <field name="type">
+               <column name="TYPE_NAME" length="32672" jdbc-type="VARCHAR" allows-null="false"/>
+             </field>
+             <field name="comment">
+               <column name="COMMENT" length="256" jdbc-type="VARCHAR" allows-null="true"/>
+             </field>
+           </embedded>
+         </element>
+       </field>
+     </class>
+ 
+    <class name="MStringList" identity-type="datastore" table="Skewed_STRING_LIST" detachable="true">
+      <datastore-identity>
+        <column name="STRING_LIST_ID"/>
+      </datastore-identity>
+      <field name="internalList" table="Skewed_STRING_LIST_VALUES">
+        <collection element-type="java.lang.String"/>
+        <join>
+          <column name="STRING_LIST_ID"/>
+        </join>
+        <element column="STRING_LIST_VALUE"/>
+      </field>
+    </class>
+ 
+     <class name="MStorageDescriptor" identity-type="datastore" table="SDS" detachable="true">
+       <datastore-identity>
+         <column name="SD_ID"/>
+       </datastore-identity>
+       <field name="cd">
+           <column name="CD_ID"/>
+       </field>
+       <field name="location">
+         <column name="LOCATION" length="4000" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="inputFormat">
+         <column name="INPUT_FORMAT" length="4000" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="outputFormat">
+         <column name="OUTPUT_FORMAT" length="4000" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="isCompressed">
+         <column name="IS_COMPRESSED"/>
+       </field>
+       <field name="isStoredAsSubDirectories">
+         <column name="IS_STOREDASSUBDIRECTORIES"/>
+       </field>
+       <field name="numBuckets">
+         <column name="NUM_BUCKETS" jdbc-type="integer"/>
+       </field>
+       <field name="serDeInfo" dependent="true">
+         <column name="SERDE_ID"/>
+       </field>
+       <field name="bucketCols" table="BUCKETING_COLS">
+         <collection element-type="java.lang.String"/>
+         <join>
+           <column name="SD_ID"/>
+         </join>
+         <element column="BUCKET_COL_NAME"/>
+       </field>
+       <field name="sortCols" table="SORT_COLS">
+         <collection element-type="MOrder"/>
+         <join>
+           <column name="SD_ID"/>
+         </join>
+         <element>
+           <embedded>
+             <field name="col">
+               <column name="COLUMN_NAME" length="767" jdbc-type="VARCHAR"/>
+               </field>
+             <field name="order">
+               <column name="ORDER" jdbc-type="INTEGER"  allows-null="false"/>
+             </field>
+           </embedded>
+         </element>
+       </field>
+       <field name="parameters" table="SD_PARAMS">
+         <map key-type="java.lang.String" value-type="java.lang.String"/>
+         <join>
+           <column name="SD_ID"/>
+         </join>
+         <key>
+            <column name="PARAM_KEY" length="256" jdbc-type="VARCHAR"/>
+         </key>
+         <value>
+            <column name="PARAM_VALUE" length="32672" jdbc-type="VARCHAR"/>
+         </value>
+       </field>
+       <field name="skewedColNames" table="SKEWED_COL_NAMES">
+         <collection element-type="java.lang.String"/>
+         <join>
+           <column name="SD_ID"/>
+         </join>
+         <element column="SKEWED_COL_NAME"/>
+       </field>
+       <field name="skewedColValues" table="SKEWED_VALUES">
+         <collection element-type="MStringList"/>
+         <join/>
+       </field>
+       <field name="skewedColValueLocationMaps" table="SKEWED_COL_VALUE_LOC_MAP">
+         <map key-type="MStringList" value-type="java.lang.String"/>
+         <join>
+           <column name="SD_ID"/>
+         </join>
+         <value>
+           <column name="location" length="4000" jdbc-type="VARCHAR"/>
+         </value>
+       </field>
+     </class>
+ 
+     <class name="MPartition" table="PARTITIONS" identity-type="datastore" detachable="true">
+       <index name="UniquePartition" unique="true">
+         <column name="PART_NAME"/>
+         <column name="TBL_ID"/>
+       </index>
+       <datastore-identity>
+         <column name="PART_ID"/>
+       </datastore-identity>
+       <field name="partitionName">
+         <column name="PART_NAME" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="table">
+         <column name="TBL_ID"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="lastAccessTime">
+         <column name="LAST_ACCESS_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="values" table="PARTITION_KEY_VALS">
+         <collection element-type="java.lang.String"/>
+         <join>
+           <column name="PART_ID"/>
+         </join>
+         <element column="PART_KEY_VAL"/>
+       </field>
+       <field name="sd" dependent="true">
+         <column name="SD_ID"/>
+       </field>
+       <field name="parameters" table="PARTITION_PARAMS">
+         <map key-type="java.lang.String" value-type="java.lang.String"/>
+         <join>
+           <column name="PART_ID"/>
+         </join>
+         <key>
+            <column name="PARAM_KEY" length="256" jdbc-type="VARCHAR"/>
+         </key>
+         <value>
+            <column name="PARAM_VALUE" length="4000" jdbc-type="VARCHAR"/>
+         </value>
+       </field>
++      <field name="writeId">
++        <column name="WRITE_ID"/>
++      </field>
+     </class>
+     
+     <class name="MIndex" table="IDXS" identity-type="datastore" detachable="true">
+       <index name="UniqueINDEX" unique="true">
+         <column name="INDEX_NAME"/>
+         <column name="ORIG_TBL_ID"/>
+       </index>
+       
+       <datastore-identity>
+         <column name="INDEX_ID"/>
+       </datastore-identity>
+       <field name="indexName">
+         <column name="INDEX_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="origTable">
+         <column name="ORIG_TBL_ID"/>
+       </field>
+       <field name="indexTable">
+         <column name="INDEX_TBL_ID"/>
+       </field>
+       <field name="indexHandlerClass">
+         <column name="INDEX_HANDLER_CLASS" length="4000" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="deferredRebuild">
+         <column name="DEFERRED_REBUILD"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="lastAccessTime">
+         <column name="LAST_ACCESS_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="sd" dependent="true">
+         <column name="SD_ID"/>
+       </field>
+       <field name="parameters" table="INDEX_PARAMS">
+         <map key-type="java.lang.String" value-type="java.lang.String"/>
+         <join>
+           <column name="INDEX_ID"/>
+         </join>
+         <key>
+            <column name="PARAM_KEY" length="256" jdbc-type="VARCHAR"/>
+         </key>
+         <value>
+            <column name="PARAM_VALUE" length="4000" jdbc-type="VARCHAR"/>
+         </value>
+       </field>
+     </class>
+ 
+     <class name="MRole" table="ROLES" identity-type="datastore" detachable="true">
+ 
+       <index name="RoleEntityINDEX" unique="true">
+         <column name="ROLE_NAME"/>
+       </index>
+ 
+       <datastore-identity>
+         <column name="ROLE_ID"/>
+       </datastore-identity>
+ 
+       <field name="roleName">
+         <column name="ROLE_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="ownerName">
+         <column name="OWNER_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       
+     </class>
+ 
+     <class name="MRoleMap" table="ROLE_MAP" identity-type="datastore" detachable="true">
+       <index name="UserRoleMapINDEX" unique="true">
+         <column name="PRINCIPAL_NAME"/>
+         <column name="ROLE_ID"/>
+         <column name="GRANTOR"/>
+         <column name="GRANTOR_TYPE"/>
+       </index>
+ 
+       <datastore-identity>
+         <column name="ROLE_GRANT_ID"/>
+       </datastore-identity>
+ 
+       <field name="role">
+         <column name="ROLE_ID" />
+       </field>
+       <field name="principalName">
+         <column name="PRINCIPAL_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="principalType">
+         <column name="PRINCIPAL_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="addTime">
+         <column name="ADD_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="grantor">
+         <column name="GRANTOR" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantorType">
+         <column name="GRANTOR_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantOption">
+         <column name="GRANT_OPTION" jdbc-type="SMALLINT"/>
+       </field>
+     </class>
+ 
+     <class name="MGlobalPrivilege" table="GLOBAL_PRIVS" identity-type="datastore" detachable="true">
+       
+       <index name="GlobalPrivilegeIndex" unique="true">
+         <column name="AUTHORIZER"/>
+         <column name="PRINCIPAL_NAME"/>
+         <column name="PRINCIPAL_TYPE"/>
+         <column name="USER_PRIV"/>
+         <column name="GRANTOR"/>
+         <column name="GRANTOR_TYPE"/>
+       </index>
+     
+       <datastore-identity>
+         <column name="USER_GRANT_ID"/>
+       </datastore-identity>
+ 
+       <field name="principalName">
+         <column name="PRINCIPAL_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="principalType">
+         <column name="PRINCIPAL_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="privilege">
+         <column name="USER_PRIV" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="grantor">
+         <column name="GRANTOR" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantorType">
+         <column name="GRANTOR_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantOption">
+         <column name="GRANT_OPTION" jdbc-type="SMALLINT"/>
+       </field>
+       <field name="authorizer">
+         <column name="AUTHORIZER" length="128" jdbc-type="VARCHAR"/>
+       </field>
+     </class>
+ 
+     <class name="MDBPrivilege" table="DB_PRIVS" identity-type="datastore" detachable="true">
+       
+       <index name="DBPrivilegeIndex" unique="true">
+         <column name="AUTHORIZER"/>
+         <column name="DB_ID"/>
+         <column name="PRINCIPAL_NAME"/>
+         <column name="PRINCIPAL_TYPE"/>
+         <column name="DB_PRIV"/>
+         <column name="GRANTOR"/>
+         <column name="GRANTOR_TYPE"/>
+       </index>
+ 
+       <datastore-identity>
+         <column name="DB_GRANT_ID"/>
+       </datastore-identity>
+ 
+       <field name="principalName">
+         <column name="PRINCIPAL_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="principalType">
+         <column name="PRINCIPAL_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="database">
+         <column name="DB_ID" />
+       </field>
+       <field name="privilege">
+         <column name="DB_PRIV" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="grantor">
+         <column name="GRANTOR" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantorType">
+         <column name="GRANTOR_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantOption">
+         <column name="GRANT_OPTION" jdbc-type="SMALLINT"/>
+       </field>
+       <field name="authorizer">
+         <column name="AUTHORIZER" length="128" jdbc-type="VARCHAR"/>
+       </field>
+     </class>
+ 
+     <class name="MTablePrivilege" table="TBL_PRIVS" identity-type="datastore" detachable="true">
+     
+       <index name="TablePrivilegeIndex" unique="false">
+         <column name="AUTHORIZER"/>
+         <column name="TBL_ID"/>
+         <column name="PRINCIPAL_NAME"/>
+         <column name="PRINCIPAL_TYPE"/>
+         <column name="TBL_PRIV"/>
+         <column name="GRANTOR"/>
+         <column name="GRANTOR_TYPE"/>
+       </index>
+ 
+       <datastore-identity>
+         <column name="TBL_GRANT_ID"/>
+       </datastore-identity>
+ 
+       <field name="principalName">
+         <column name="PRINCIPAL_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="principalType">
+         <column name="PRINCIPAL_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="table">
+         <column name="TBL_ID"/>
+       </field>
+       <field name="privilege">
+         <column name="TBL_PRIV" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="grantor">
+         <column name="GRANTOR" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantorType">
+         <column name="GRANTOR_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantOption">
+         <column name="GRANT_OPTION" jdbc-type="SMALLINT"/>
+       </field>
+       <field name="authorizer">
+         <column name="AUTHORIZER" length="128" jdbc-type="VARCHAR"/>
+       </field>
+     </class>
+ 
+     <class name="MPartitionPrivilege" table="PART_PRIVS" identity-type="datastore" detachable="true">
+     
+       <index name="PartPrivilegeIndex" unique="false">
+         <column name="AUTHORIZER"/>
+         <column name="PART_ID"/>
+         <column name="PRINCIPAL_NAME"/>
+         <column name="PRINCIPAL_TYPE"/>
+         <column name="PART_PRIV"/>
+         <column name="GRANTOR"/>
+         <column name="GRANTOR_TYPE"/>
+       </index>
+ 
+       <datastore-identity>
+         <column name="PART_GRANT_ID"/>
+       </datastore-identity>
+ 
+       <field name="principalName">
+         <column name="PRINCIPAL_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="principalType">
+         <column name="PRINCIPAL_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="partition">
+         <column name="PART_ID" />
+       </field>
+       <field name="privilege">
+         <column name="PART_PRIV" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="grantor">
+         <column name="GRANTOR" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantorType">
+         <column name="GRANTOR_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantOption">
+         <column name="GRANT_OPTION" jdbc-type="SMALLINT"/>
+       </field>
+       <field name="authorizer">
+         <column name="AUTHORIZER" length="128" jdbc-type="VARCHAR"/>
+       </field>
+     </class>
+ 
+     <class name="MTableColumnPrivilege" table="TBL_COL_PRIVS" identity-type="datastore" detachable="true">
+     
+      <index name="TableColumnPrivilegeIndex" unique="false">
+         <column name="AUTHORIZER"/>
+         <column name="TBL_ID"/>
+         <column name="COLUMN_NAME"/>
+         <column name="PRINCIPAL_NAME"/>
+         <column name="PRINCIPAL_TYPE"/>
+         <column name="TBL_COL_PRIV"/>
+         <column name="GRANTOR"/>
+         <column name="GRANTOR_TYPE"/>
+       </index>
+ 
+       <datastore-identity>
+         <column name="TBL_COLUMN_GRANT_ID"/>
+       </datastore-identity>
+ 
+       <field name="principalName">
+         <column name="PRINCIPAL_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="principalType">
+         <column name="PRINCIPAL_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="table">
+         <column name="TBL_ID" />
+       </field>
+       <field name="columnName">
+         <column name="COLUMN_NAME" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="privilege">
+         <column name="TBL_COL_PRIV" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="grantor">
+         <column name="GRANTOR" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantorType">
+         <column name="GRANTOR_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantOption">
+         <column name="GRANT_OPTION" jdbc-type="SMALLINT"/>
+       </field>
+       <field name="authorizer">
+         <column name="AUTHORIZER" length="128" jdbc-type="VARCHAR"/>
+       </field>
+     </class>
+ 
+     <class name="MPartitionColumnPrivilege" table="PART_COL_PRIVS" identity-type="datastore" detachable="true">
+     
+      <index name="PartitionColumnPrivilegeIndex" unique="false">
+         <column name="AUTHORIZER"/>
+         <column name="PART_ID"/>
+         <column name="COLUMN_NAME"/>
+         <column name="PRINCIPAL_NAME"/>
+         <column name="PRINCIPAL_TYPE"/>
+         <column name="PART_COL_PRIV"/>
+         <column name="GRANTOR"/>
+         <column name="GRANTOR_TYPE"/>
+       </index>
+ 
+       <datastore-identity>
+         <column name="PART_COLUMN_GRANT_ID"/>
+       </datastore-identity>
+ 
+       <field name="principalName">
+         <column name="PRINCIPAL_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="principalType">
+         <column name="PRINCIPAL_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="partition">
+         <column name="PART_ID" />
+       </field>
+       <field name="columnName">
+         <column name="COLUMN_NAME" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="privilege">
+         <column name="PART_COL_PRIV" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="grantor">
+         <column name="GRANTOR" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantorType">
+         <column name="GRANTOR_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantOption">
+         <column name="GRANT_OPTION" jdbc-type="SMALLINT"/>
+       </field>
+       <field name="authorizer">
+         <column name="AUTHORIZER" length="128" jdbc-type="VARCHAR"/>
+       </field>
+     </class>
+ 
+     <class name="MPartitionEvent"  table="PARTITION_EVENTS" identity-type="datastore" detachable="true">  
+        
+       <index name="PartitionEventIndex" unique="false">
+         <column name="PARTITION_NAME"/>
+       </index>
+       
+       <datastore-identity>
+         <column name="PART_NAME_ID"/>
+       </datastore-identity>
+       
+       <field name="catalogName">  
+         <column name="CAT_NAME" length="256" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="dbName">  
+         <column name="DB_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="tblName">
+         <column name="TBL_NAME" length="256" jdbc-type="VARCHAR"/>
+       </field>
+        <field name="partName">
+         <column name="PARTITION_NAME" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="eventType">
+         <column name="EVENT_TYPE"  jdbc-type="integer"/>
+       </field>
+      <field name="eventTime">
+         <column name="EVENT_TIME"  jdbc-type="BIGINT"/>
+       </field>
+ 
+     </class>
+     
+     <class name="MMasterKey" table="MASTER_KEYS" identity-type="application" detachable="true">
+ 
+       <field name="keyId" primary-key="true" value-strategy="native">
+         <column name="KEY_ID" jdbc-type="integer" />
+       </field>
+         
+       <field name="masterKey">
+         <column name="MASTER_KEY" length="767" jdbc-type="VARCHAR" />
+       </field>  
+       
+     </class>
+ 
+     <class name="MDelegationToken" table="DELEGATION_TOKENS" identity-type="application" detachable="true">
+ 
+       <field name="tokenIdentifier" primary-key="true">
+         <column name="TOKEN_IDENT" length="767" jdbc-type="VARCHAR" />
+       </field>  
+ 
+       <field name="tokenStr">
+         <column name="TOKEN" length="767" jdbc-type="VARCHAR" />
+       </field>
+             
+     </class>    
+ 
+     <class name="MTableColumnStatistics" table="TAB_COL_STATS" identity-type="datastore" detachable="true">
+       <datastore-identity>
+         <column name="CS_ID"/>
+       </datastore-identity>
+ 
+       <field name ="catName">
+         <column name="CAT_NAME" length="256" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name ="dbName">
+         <column name="DB_NAME" length="128" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="tableName">
+         <column name="TABLE_NAME" length="256" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="table">
+         <column name="TBL_ID"/>
+       </field>
+       <field name="colName">
+         <column name="COLUMN_NAME" length="767" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="colType">
+         <column name="COLUMN_TYPE" length="128" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="longLowValue">
+         <column name="LONG_LOW_VALUE"  jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="longHighValue">
+         <column name="LONG_HIGH_VALUE" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+        <field name="doubleLowValue">
+         <column name="DOUBLE_LOW_VALUE"  jdbc-type="DOUBLE" allows-null="true"/>
+       </field>
+       <field name="doubleHighValue">
+         <column name="DOUBLE_HIGH_VALUE" jdbc-type="DOUBLE" allows-null="true"/>
+       </field>
+       <field name="decimalLowValue">
+         <column name="BIG_DECIMAL_LOW_VALUE"  jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="decimalHighValue">
+         <column name="BIG_DECIMAL_HIGH_VALUE" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="numNulls">
+         <column name="NUM_NULLS" jdbc-type="BIGINT" allows-null="false"/>
+       </field>
+       <field name="numDVs">
+         <column name="NUM_DISTINCTS" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="bitVector">
+         <column name="BIT_VECTOR" jdbc-type="BLOB" allows-null="true"/>
+       </field>
+       <field name="avgColLen">
+         <column name="AVG_COL_LEN" jdbc-type="DOUBLE" allows-null="true"/>
+       </field>
+       <field name="maxColLen">
+         <column name="MAX_COL_LEN" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="numTrues">
+         <column name="NUM_TRUES" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="numFalses">
+         <column name="NUM_FALSES" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="lastAnalyzed">
+         <column name="LAST_ANALYZED" jdbc-type="BIGINT" allows-null="false"/>
+       </field>
+     </class>
+ 
+     <class name="MPartitionColumnStatistics" table="PART_COL_STATS" identity-type="datastore" detachable="true">
+       <datastore-identity>
+         <column name="CS_ID"/>
+       </datastore-identity>
+ 
+       <field name ="catName">
+         <column name="CAT_NAME" length="256" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name ="dbName">
+         <column name="DB_NAME" length="128" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="tableName">
+         <column name="TABLE_NAME" length="256" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="partitionName">
+         <column name="PARTITION_NAME" length="767" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="partition">
+         <column name="PART_ID"/>
+       </field>
+       <field name="colName">
+         <column name="COLUMN_NAME" length="767" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="colType">
+         <column name="COLUMN_TYPE" length="128" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="longLowValue">
+         <column name="LONG_LOW_VALUE"  jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="longHighValue">
+         <column name="LONG_HIGH_VALUE" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+        <field name="doubleLowValue">
+         <column name="DOUBLE_LOW_VALUE"  jdbc-type="DOUBLE" allows-null="true"/>
+       </field>
+       <field name="doubleHighValue">
+         <column name="DOUBLE_HIGH_VALUE" jdbc-type="DOUBLE" allows-null="true"/>
+       </field>
+       <field name="decimalLowValue">
+         <column name="BIG_DECIMAL_LOW_VALUE"  jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="decimalHighValue">
+         <column name="BIG_DECIMAL_HIGH_VALUE" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="numNulls">
+         <column name="NUM_NULLS" jdbc-type="BIGINT" allows-null="false"/>
+       </field>
+       <field name="numDVs">
+         <column name="NUM_DISTINCTS" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="bitVector">
+         <column name="BIT_VECTOR" jdbc-type="BLOB" allows-null="true"/>
+       </field>
+       <field name="avgColLen">
+         <column name="AVG_COL_LEN" jdbc-type="DOUBLE" allows-null="true"/>
+       </field>
+       <field name="maxColLen">
+         <column name="MAX_COL_LEN" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="numTrues">
+         <column name="NUM_TRUES" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="numFalses">
+         <column name="NUM_FALSES" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="lastAnalyzed">
+         <column name="LAST_ANALYZED" jdbc-type="BIGINT" allows-null="false"/>
+       </field>
+     </class>
+     <class name="MVersionTable" table="VERSION" identity-type="datastore" detachable="true">
+       <datastore-identity>
+         <column name="VER_ID"/>
+       </datastore-identity>
+       <field name ="schemaVersion">
+         <column name="SCHEMA_VERSION" length="127" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name ="versionComment">
+         <column name="VERSION_COMMENT" length="255" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+     </class>
+ 
+     <class name="MMetastoreDBProperties" table="METASTORE_DB_PROPERTIES" identity-type="application" detachable="true">
+       <field name ="propertyKey" primary-key="true">
+         <column name="PROPERTY_KEY" length="255" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name ="propertyValue">
+         <column name="PROPERTY_VALUE" length="1000" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name ="description">
+         <column name="DESCRIPTION" length="1000" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+     </class>
+ 
+     <class name="MResourceUri" embedded-only="true" table="RESOURCE_URI" detachable="true">
+       <field name="resourceType">
+         <column name="RESOURCE_TYPE" jdbc-type="INTEGER"/>
+       </field>
+       <field name="uri">
+         <column name="RESOURCE_URI" length="4000" jdbc-type="VARCHAR"/>
+       </field>
+     </class>
+ 
+     <class name="MFunction" table="FUNCS" identity-type="datastore" detachable="true">
+       <datastore-identity>
+         <column name="FUNC_ID"/>
+       </datastore-identity>
+       <index name="UniqueFunction" unique="true">
+         <column name="FUNC_NAME"/>
+         <column name="DB_ID"/>
+       </index>
+       <field name="functionName">
+         <column name="FUNC_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="database">
+         <column name="DB_ID"/>
+       </field>
+       <field name="functionType">
+         <column name="FUNC_TYPE" jdbc-type="integer"/>
+       </field>
+       <field name="className">
+         <column name="CLASS_NAME" length="4000" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="ownerName">    
+         <column name="OWNER_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+        <field name="ownerType">
+         <column name="OWNER_TYPE" length="10" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="resourceUris" table="FUNC_RU">
+         <collection element-type="MResourceUri"/>
+         <join>
+           <column name="FUNC_ID"/>
+         </join>
+         <element>
+           <embedded>
+             <field name="resourceType">
+               <column name="RESOURCE_TYPE" jdbc-type="INTEGER"/>
+             </field>
+             <field name="uri">
+               <column name="RESOURCE_URI" length="4000" jdbc-type="VARCHAR"/>
+             </field>
+           </embedded>
+         </element>
+       </field>
+     </class>
+ 
+     <class name="MNotificationLog" table="NOTIFICATION_LOG" identity-type="datastore" detachable="true">
+       <datastore-identity strategy="increment"/>
+       <datastore-identity key-cache-size="1"/>
+       <datastore-identity>
+         <column name="NL_ID"/>
+       </datastore-identity>
+       <field name="eventId">
+         <column name="EVENT_ID" jdbc-type="BIGINT" allows-null="false"/>
+       </field>
+       <field name="eventTime">
+         <column name="EVENT_TIME" jdbc-type="INTEGER" allows-null="false"/>
+       </field>
+       <field name="eventType">
+         <column name="EVENT_TYPE" length="32" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="catalogName">
+         <column name="CAT_NAME" length="256" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="dbName">
+         <column name="DB_NAME" length="128" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="tableName">
+         <column name="TBL_NAME" length="256" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="message">
+         <column name="MESSAGE" jdbc-type="LONGVARCHAR"/>
+       </field>
+       <field name="messageFormat">
+         <column name="MESSAGE_FORMAT" length="16" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+     </class>
+ 
+     <!-- I tried to use a sequence here but derby didn't handle it well. -->
+     <class name="MNotificationNextId" table="NOTIFICATION_SEQUENCE" identity-type="datastore" detachable="true">
+       <datastore-identity>
+         <column name="NNI_ID"/>
+       </datastore-identity>
+       <field name="nextEventId">
+         <column name="NEXT_EVENT_ID" jdbc-type="BIGINT" allows-null="false"/>
+       </field>
+     </class>
+ 
+     <class name="MTxnWriteNotificationLog" table="TXN_WRITE_NOTIFICATION_LOG" identity-type="datastore" detachable="true">
+       <datastore-identity strategy="increment"/>
+       <datastore-identity key-cache-size="1"/>
+       <datastore-identity>
+         <column name="WNL_ID"/>
+       </datastore-identity>
+       <field name="txnId">
+         <column name="WNL_TXNID" jdbc-type="BIGINT" allows-null="false"/>
+       </field>
+       <field name="writeId">
+         <column name="WNL_WRITEID" jdbc-type="BIGINT" allows-null="false"/>
+       </field>
+       <field name="database">
+         <column name="WNL_DATABASE" length="128" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="table">
+         <column name="WNL_TABLE" length="128" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="partition">
+         <column name="WNL_PARTITION" length="1024" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="tableObject">
+         <column name="WNL_TABLE_OBJ" jdbc-type="LONGVARCHAR"/>
+       </field>
+       <field name="partObject">
+         <column name="WNL_PARTITION_OBJ" jdbc-type="LONGVARCHAR"/>
+       </field>
+       <field name="files">
+         <column name="WNL_FILES" jdbc-type="LONGVARCHAR"/>
+       </field>
+       <field name="eventTime">
+         <column name="WNL_EVENT_TIME" jdbc-type="INTEGER" allows-null="false"/>
+       </field>
+     </class>
+ 
+     <class name="MWMResourcePlan" identity-type="datastore" table="WM_RESOURCEPLAN" detachable="true">
+       <datastore-identity>
+         <column name="RP_ID"/>
+       </datastore-identity>
+       <field name="name">
+         <column name="NAME" length="128" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="queryParallelism">
+         <column name="QUERY_PARALLELISM" jdbc-type="integer" allows-null="true"/>
+       </field>
+       <field name="status">
+         <column name="STATUS" jdbc-type="string" allows-null="false"/>
+       </field>
+       <field name="defaultPool">
+         <column name="DEFAULT_POOL_ID" jdbc-type="integer" allows-null="true"/>
+       </field>
+       <index name="UniqueResourcePlan" unique="true">
+         <column name="NAME"/>
+       </index>
+ 
+       <field name="pools" mapped-by="resourcePlan">
+         <collection element-type="MWMPool"/>
+       </field>
+       <field name="triggers" mapped-by="resourcePlan">
+         <collection element-type="MWMTrigger"/>
+       </field>
+       <field name="mappings" mapped-by="resourcePlan">
+         <collection element-type="MWMMapping"/>
+       </field>
+     </class>
+ 
+     <class name="MWMPool" identity-type="datastore" table="WM_POOL" detachable="true">
+       <datastore-identity>
+         <column name="POOL_ID"/>
+       </datastore-identity>
+       <field name="resourcePlan">
+         <column name="RP_ID" jdbc-type="integer" allows-null="false"/>
+       </field>
+       <field name="path">
+         <column name="PATH" length="1024" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="allocFraction">
+         <column name="ALLOC_FRACTION" jdbc-type="double" allows-null="true"/>
+       </field>
+       <field name="queryParallelism">
+         <column name="QUERY_PARALLELISM" jdbc-type="integer" allows-null="true"/>
+       </field>
+       <field name="schedulingPolicy">
+         <column name="SCHEDULING_POLICY" jdbc-type="string" allows-null="true"/>
+       </field>
+       <field name="triggers" table="WM_POOL_TO_TRIGGER">
+         <collection element-type="MWMTrigger" />
+         <join>
+           <column name="POOL_ID" />
+         </join>
+         <element>
+           <column name="TRIGGER_ID"/>
+         </element>
+       </field>
+       <index name="UniqueWMPool" unique="true">
+         <column name="RP_ID"/>
+         <column name="PATH"/>
+       </index>
+     </class>
+ 
+     <class name="MWMTrigger" identity-type="datastore" table="WM_TRIGGER" detachable="true">
+       <datastore-identity>
+         <column name="TRIGGER_ID"/>
+       </datastore-identity>
+       <field name="resourcePlan">
+         <column name="RP_ID" jdbc-type="integer" allows-null="false"/>
+       </field>
+       <field name="name">
+         <column name="NAME" length="128" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="triggerExpression">
+         <column name="TRIGGER_EXPRESSION" jdbc-type="string" allows-null="true"/>
+       </field>
+       <field name="actionExpression">
+         <column name="ACTION_EXPRESSION" jdbc-type="string" allows-null="true"/>
+       </field>
+       <field name="isInUnmanaged">
+         <column name="IS_IN_UNMANAGED" allows-null="false"/>
+       </field>
+       <field name="pools" mapped-by="triggers">
+         <collection element-type="MWMPool" />
+       </field>
+       <index name="UniqueWMTrigger" unique="true">
+         <column name="RP_ID"/>
+         <column name="NAME"/>
+       </index>
+     </class>
+ 
+     <class name="MWMMapping" identity-type="datastore" table="WM_MAPPING" detachable="true">
+       <datastore-identity>
+         <column name="MAPPING_ID"/>
+       </datastore-identity>
+       <field name="resourcePlan">
+         <column name="RP_ID" jdbc-type="integer" allows-null="false"/>
+       </field>
+       <field name="entityType">
+         <column name="ENTITY_TYPE" jdbc-type="string" length="128" />
+       </field>
+       <field name="entityName">
+         <column name="ENTITY_NAME" jdbc-type="string" length="128" />
+       </field>
+       <field name="pool">
+         <column name="POOL_ID" jdbc-type="integer" allows-null="true"/>
+       </field>
+       <field name="ordering">
+         <column name="ORDERING" jdbc-type="integer" allows-null="true"/>
+       </field>
+       <index name="UniqueWMMapping" unique="true">
+         <column name="RP_ID"/>
+         <column name="ENTITY_TYPE"/>
+         <column name="ENTITY_NAME"/>
+       </index>
+     </class>
+ 
+     <class name="MISchema" identity-type="datastore" table="I_SCHEMA" detachable="true">
+       <datastore-identity>
+         <column name="SCHEMA_ID"/>
+       </datastore-identity>
+       <field name="schemaType">
+         <column name="SCHEMA_TYPE" jdbc-type="integer"/>
+       </field>
+       <field name="name">
+         <column name="NAME" jdbc-type="varchar" length="256"/>
+       </field>
+       <field name="db">
+         <column name="DB_ID"/>
+       </field>
+       <field name="compatibility">
+         <column name="COMPATIBILITY" jdbc-type="integer"/>
+       </field>
+       <field name="validationLevel">
+         <column name="VALIDATION_LEVEL" jdbc-type="integer"/>
+       </field>
+       <field name="canEvolve">
+         <column name="CAN_EVOLVE"/>
+       </field>
+       <field name="schemaGroup">
+         <column name="SCHEMA_GROUP" jdbc-type="varchar" length="256" allows-null="true"/>
+       </field>
+       <field name="description">
+         <column name="DESCRIPTION" jdbc-type="varchar" length="4000" allows-null="true"/>
+       </field>
+     </class>
+ 
+     <class name="MSchemaVersion" identity-type="datastore" table="SCHEMA_VERSION" detachable="true">
+       <datastore-identity>
+         <column name="SCHEMA_VERSION_ID"/>
+       </datastore-identity>
+       <field name="iSchema">
+         <column name="SCHEMA_ID"/>
+       </field>
+       <field name="version">
+         <column name="VERSION" jdbc-type="integer"/>
+       </field>
+       <field name="createdAt">
+         <column name="CREATED_AT" jdbc-type="bigint"/>
+       </field>
+       <field name="cols">
+           <column name="CD_ID"/>
+       </field>
+       <field name="state">
+         <column name="STATE" jdbc-type="integer"/>
+       </field>
+       <field name="description">
+         <column name="DESCRIPTION" jdbc-type="varchar" length="4000" allows-null="true"/>
+       </field>
+       <field name="schemaText" default-fetch-group="false">
+         <column name="SCHEMA_TEXT" jdbc-type="LONGVARCHAR"/>
+       </field>
+       <field name="fingerprint">
+         <column name="FINGERPRINT" jdbc-type="varchar" length="256" allows-null="true"/>
+       </field>
+       <field name="name">
+         <column name="SCHEMA_VERSION_NAME" jdbc-type="varchar" length="256" allows-null="true"/>
+       </field>
+       <field name="serDe">
+         <column name="SERDE_ID"/>
+       </field>
+     </class>
+     <class name="MRuntimeStat" identity-type="datastore" table="RUNTIME_STATS" detachable="true">
+       <datastore-identity>
+         <column name="RS_ID"/>
+       </datastore-identity>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="weight">
+         <column name="WEIGHT" jdbc-type="integer"/>
+       </field>
+       <field name="payload">
+         <column name="PAYLOAD" jdbc-type="BLOB" allows-null="true"/>
+       </field>
+    </class>
+   </package>
+ </jdo>
+ 


[30/50] [abbrv] hive git commit: HIVE-20047 : remove txnID argument for txn stats methods (Sergey Shelukhin)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
index 0aab253..f3e0ba4 100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
@@ -863,7 +863,7 @@ public class TestAlterPartitions extends MetaStoreClientTest {
     createTable4PartColsParts(client);
     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
     client.alter_partitions("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext(),
-        -1, null, -1);
+        null, -1);
   }
 
   @Test(expected = InvalidOperationException.class)
@@ -1080,7 +1080,7 @@ public class TestAlterPartitions extends MetaStoreClientTest {
 
     Partition partToRename = oldParts.get(3);
     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
-    client.renamePartition("nosuch", DB_NAME, TABLE_NAME, oldValues.get(3), partToRename, -1, null);
+    client.renamePartition("nosuch", DB_NAME, TABLE_NAME, oldValues.get(3), partToRename, null);
   }
 
   @Test(expected = InvalidOperationException.class)


[33/50] [abbrv] hive git commit: HIVE-20047 : remove txnID argument for txn stats methods (Sergey Shelukhin)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
index b2d85f7..7e08420 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
@@ -43,8 +43,7 @@ import org.slf4j.LoggerFactory;
   private static final org.apache.thrift.protocol.TField COL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("colNames", org.apache.thrift.protocol.TType.LIST, (short)3);
   private static final org.apache.thrift.protocol.TField PART_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("partNames", org.apache.thrift.protocol.TType.LIST, (short)4);
   private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)5);
-  private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)6);
-  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)7);
+  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)6);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -57,7 +56,6 @@ import org.slf4j.LoggerFactory;
   private List<String> colNames; // required
   private List<String> partNames; // required
   private String catName; // optional
-  private long txnId; // optional
   private String validWriteIdList; // optional
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
@@ -67,8 +65,7 @@ import org.slf4j.LoggerFactory;
     COL_NAMES((short)3, "colNames"),
     PART_NAMES((short)4, "partNames"),
     CAT_NAME((short)5, "catName"),
-    TXN_ID((short)6, "txnId"),
-    VALID_WRITE_ID_LIST((short)7, "validWriteIdList");
+    VALID_WRITE_ID_LIST((short)6, "validWriteIdList");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -93,9 +90,7 @@ import org.slf4j.LoggerFactory;
           return PART_NAMES;
         case 5: // CAT_NAME
           return CAT_NAME;
-        case 6: // TXN_ID
-          return TXN_ID;
-        case 7: // VALID_WRITE_ID_LIST
+        case 6: // VALID_WRITE_ID_LIST
           return VALID_WRITE_ID_LIST;
         default:
           return null;
@@ -137,9 +132,7 @@ import org.slf4j.LoggerFactory;
   }
 
   // isset id assignments
-  private static final int __TXNID_ISSET_ID = 0;
-  private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST};
+  private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.VALID_WRITE_ID_LIST};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -155,8 +148,6 @@ import org.slf4j.LoggerFactory;
             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
     tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
@@ -164,8 +155,6 @@ import org.slf4j.LoggerFactory;
   }
 
   public PartitionsStatsRequest() {
-    this.txnId = -1L;
-
   }
 
   public PartitionsStatsRequest(
@@ -185,7 +174,6 @@ import org.slf4j.LoggerFactory;
    * Performs a deep copy on <i>other</i>.
    */
   public PartitionsStatsRequest(PartitionsStatsRequest other) {
-    __isset_bitfield = other.__isset_bitfield;
     if (other.isSetDbName()) {
       this.dbName = other.dbName;
     }
@@ -203,7 +191,6 @@ import org.slf4j.LoggerFactory;
     if (other.isSetCatName()) {
       this.catName = other.catName;
     }
-    this.txnId = other.txnId;
     if (other.isSetValidWriteIdList()) {
       this.validWriteIdList = other.validWriteIdList;
     }
@@ -220,8 +207,6 @@ import org.slf4j.LoggerFactory;
     this.colNames = null;
     this.partNames = null;
     this.catName = null;
-    this.txnId = -1L;
-
     this.validWriteIdList = null;
   }
 
@@ -370,28 +355,6 @@ import org.slf4j.LoggerFactory;
     }
   }
 
-  public long getTxnId() {
-    return this.txnId;
-  }
-
-  public void setTxnId(long txnId) {
-    this.txnId = txnId;
-    setTxnIdIsSet(true);
-  }
-
-  public void unsetTxnId() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
-  }
-
-  /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
-  public boolean isSetTxnId() {
-    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
-  }
-
-  public void setTxnIdIsSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
-  }
-
   public String getValidWriteIdList() {
     return this.validWriteIdList;
   }
@@ -457,14 +420,6 @@ import org.slf4j.LoggerFactory;
       }
       break;
 
-    case TXN_ID:
-      if (value == null) {
-        unsetTxnId();
-      } else {
-        setTxnId((Long)value);
-      }
-      break;
-
     case VALID_WRITE_ID_LIST:
       if (value == null) {
         unsetValidWriteIdList();
@@ -493,9 +448,6 @@ import org.slf4j.LoggerFactory;
     case CAT_NAME:
       return getCatName();
 
-    case TXN_ID:
-      return getTxnId();
-
     case VALID_WRITE_ID_LIST:
       return getValidWriteIdList();
 
@@ -520,8 +472,6 @@ import org.slf4j.LoggerFactory;
       return isSetPartNames();
     case CAT_NAME:
       return isSetCatName();
-    case TXN_ID:
-      return isSetTxnId();
     case VALID_WRITE_ID_LIST:
       return isSetValidWriteIdList();
     }
@@ -586,15 +536,6 @@ import org.slf4j.LoggerFactory;
         return false;
     }
 
-    boolean this_present_txnId = true && this.isSetTxnId();
-    boolean that_present_txnId = true && that.isSetTxnId();
-    if (this_present_txnId || that_present_txnId) {
-      if (!(this_present_txnId && that_present_txnId))
-        return false;
-      if (this.txnId != that.txnId)
-        return false;
-    }
-
     boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
     boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
     if (this_present_validWriteIdList || that_present_validWriteIdList) {
@@ -636,11 +577,6 @@ import org.slf4j.LoggerFactory;
     if (present_catName)
       list.add(catName);
 
-    boolean present_txnId = true && (isSetTxnId());
-    list.add(present_txnId);
-    if (present_txnId)
-      list.add(txnId);
-
     boolean present_validWriteIdList = true && (isSetValidWriteIdList());
     list.add(present_validWriteIdList);
     if (present_validWriteIdList)
@@ -707,16 +643,6 @@ import org.slf4j.LoggerFactory;
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetTxnId()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
     lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
     if (lastComparison != 0) {
       return lastComparison;
@@ -788,12 +714,6 @@ import org.slf4j.LoggerFactory;
       }
       first = false;
     }
-    if (isSetTxnId()) {
-      if (!first) sb.append(", ");
-      sb.append("txnId:");
-      sb.append(this.txnId);
-      first = false;
-    }
     if (isSetValidWriteIdList()) {
       if (!first) sb.append(", ");
       sb.append("validWriteIdList:");
@@ -839,8 +759,6 @@ import org.slf4j.LoggerFactory;
 
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
-      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
-      __isset_bitfield = 0;
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
     } catch (org.apache.thrift.TException te) {
       throw new java.io.IOException(te);
@@ -925,15 +843,7 @@ import org.slf4j.LoggerFactory;
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
-          case 6: // TXN_ID
-            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
-              struct.txnId = iprot.readI64();
-              struct.setTxnIdIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 7: // VALID_WRITE_ID_LIST
+          case 6: // VALID_WRITE_ID_LIST
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.validWriteIdList = iprot.readString();
               struct.setValidWriteIdListIsSet(true);
@@ -995,11 +905,6 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldEnd();
         }
       }
-      if (struct.isSetTxnId()) {
-        oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
-        oprot.writeI64(struct.txnId);
-        oprot.writeFieldEnd();
-      }
       if (struct.validWriteIdList != null) {
         if (struct.isSetValidWriteIdList()) {
           oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
@@ -1044,19 +949,13 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetCatName()) {
         optionals.set(0);
       }
-      if (struct.isSetTxnId()) {
-        optionals.set(1);
-      }
       if (struct.isSetValidWriteIdList()) {
-        optionals.set(2);
+        optionals.set(1);
       }
-      oprot.writeBitSet(optionals, 3);
+      oprot.writeBitSet(optionals, 2);
       if (struct.isSetCatName()) {
         oprot.writeString(struct.catName);
       }
-      if (struct.isSetTxnId()) {
-        oprot.writeI64(struct.txnId);
-      }
       if (struct.isSetValidWriteIdList()) {
         oprot.writeString(struct.validWriteIdList);
       }
@@ -1091,16 +990,12 @@ import org.slf4j.LoggerFactory;
         }
       }
       struct.setPartNamesIsSet(true);
-      BitSet incoming = iprot.readBitSet(3);
+      BitSet incoming = iprot.readBitSet(2);
       if (incoming.get(0)) {
         struct.catName = iprot.readString();
         struct.setCatNameIsSet(true);
       }
       if (incoming.get(1)) {
-        struct.txnId = iprot.readI64();
-        struct.setTxnIdIsSet(true);
-      }
-      if (incoming.get(2)) {
         struct.validWriteIdList = iprot.readString();
         struct.setValidWriteIdListIsSet(true);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java
index e4ffe11..3540e99 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RenamePartitionRequest.java
@@ -43,8 +43,7 @@ import org.slf4j.LoggerFactory;
   private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)3);
   private static final org.apache.thrift.protocol.TField PART_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("partVals", org.apache.thrift.protocol.TType.LIST, (short)4);
   private static final org.apache.thrift.protocol.TField NEW_PART_FIELD_DESC = new org.apache.thrift.protocol.TField("newPart", org.apache.thrift.protocol.TType.STRUCT, (short)5);
-  private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)6);
-  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)7);
+  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)6);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -57,7 +56,6 @@ import org.slf4j.LoggerFactory;
   private String tableName; // required
   private List<String> partVals; // required
   private Partition newPart; // required
-  private long txnId; // optional
   private String validWriteIdList; // optional
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
@@ -67,8 +65,7 @@ import org.slf4j.LoggerFactory;
     TABLE_NAME((short)3, "tableName"),
     PART_VALS((short)4, "partVals"),
     NEW_PART((short)5, "newPart"),
-    TXN_ID((short)6, "txnId"),
-    VALID_WRITE_ID_LIST((short)7, "validWriteIdList");
+    VALID_WRITE_ID_LIST((short)6, "validWriteIdList");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -93,9 +90,7 @@ import org.slf4j.LoggerFactory;
           return PART_VALS;
         case 5: // NEW_PART
           return NEW_PART;
-        case 6: // TXN_ID
-          return TXN_ID;
-        case 7: // VALID_WRITE_ID_LIST
+        case 6: // VALID_WRITE_ID_LIST
           return VALID_WRITE_ID_LIST;
         default:
           return null;
@@ -137,9 +132,7 @@ import org.slf4j.LoggerFactory;
   }
 
   // isset id assignments
-  private static final int __TXNID_ISSET_ID = 0;
-  private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST};
+  private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.VALID_WRITE_ID_LIST};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -154,8 +147,6 @@ import org.slf4j.LoggerFactory;
             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
     tmpMap.put(_Fields.NEW_PART, new org.apache.thrift.meta_data.FieldMetaData("newPart", org.apache.thrift.TFieldRequirementType.REQUIRED, 
         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class)));
-    tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
@@ -163,8 +154,6 @@ import org.slf4j.LoggerFactory;
   }
 
   public RenamePartitionRequest() {
-    this.txnId = -1L;
-
   }
 
   public RenamePartitionRequest(
@@ -184,7 +173,6 @@ import org.slf4j.LoggerFactory;
    * Performs a deep copy on <i>other</i>.
    */
   public RenamePartitionRequest(RenamePartitionRequest other) {
-    __isset_bitfield = other.__isset_bitfield;
     if (other.isSetCatName()) {
       this.catName = other.catName;
     }
@@ -201,7 +189,6 @@ import org.slf4j.LoggerFactory;
     if (other.isSetNewPart()) {
       this.newPart = new Partition(other.newPart);
     }
-    this.txnId = other.txnId;
     if (other.isSetValidWriteIdList()) {
       this.validWriteIdList = other.validWriteIdList;
     }
@@ -218,8 +205,6 @@ import org.slf4j.LoggerFactory;
     this.tableName = null;
     this.partVals = null;
     this.newPart = null;
-    this.txnId = -1L;
-
     this.validWriteIdList = null;
   }
 
@@ -353,28 +338,6 @@ import org.slf4j.LoggerFactory;
     }
   }
 
-  public long getTxnId() {
-    return this.txnId;
-  }
-
-  public void setTxnId(long txnId) {
-    this.txnId = txnId;
-    setTxnIdIsSet(true);
-  }
-
-  public void unsetTxnId() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
-  }
-
-  /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
-  public boolean isSetTxnId() {
-    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
-  }
-
-  public void setTxnIdIsSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
-  }
-
   public String getValidWriteIdList() {
     return this.validWriteIdList;
   }
@@ -440,14 +403,6 @@ import org.slf4j.LoggerFactory;
       }
       break;
 
-    case TXN_ID:
-      if (value == null) {
-        unsetTxnId();
-      } else {
-        setTxnId((Long)value);
-      }
-      break;
-
     case VALID_WRITE_ID_LIST:
       if (value == null) {
         unsetValidWriteIdList();
@@ -476,9 +431,6 @@ import org.slf4j.LoggerFactory;
     case NEW_PART:
       return getNewPart();
 
-    case TXN_ID:
-      return getTxnId();
-
     case VALID_WRITE_ID_LIST:
       return getValidWriteIdList();
 
@@ -503,8 +455,6 @@ import org.slf4j.LoggerFactory;
       return isSetPartVals();
     case NEW_PART:
       return isSetNewPart();
-    case TXN_ID:
-      return isSetTxnId();
     case VALID_WRITE_ID_LIST:
       return isSetValidWriteIdList();
     }
@@ -569,15 +519,6 @@ import org.slf4j.LoggerFactory;
         return false;
     }
 
-    boolean this_present_txnId = true && this.isSetTxnId();
-    boolean that_present_txnId = true && that.isSetTxnId();
-    if (this_present_txnId || that_present_txnId) {
-      if (!(this_present_txnId && that_present_txnId))
-        return false;
-      if (this.txnId != that.txnId)
-        return false;
-    }
-
     boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
     boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
     if (this_present_validWriteIdList || that_present_validWriteIdList) {
@@ -619,11 +560,6 @@ import org.slf4j.LoggerFactory;
     if (present_newPart)
       list.add(newPart);
 
-    boolean present_txnId = true && (isSetTxnId());
-    list.add(present_txnId);
-    if (present_txnId)
-      list.add(txnId);
-
     boolean present_validWriteIdList = true && (isSetValidWriteIdList());
     list.add(present_validWriteIdList);
     if (present_validWriteIdList)
@@ -690,16 +626,6 @@ import org.slf4j.LoggerFactory;
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetTxnId()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
     lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
     if (lastComparison != 0) {
       return lastComparison;
@@ -771,12 +697,6 @@ import org.slf4j.LoggerFactory;
       sb.append(this.newPart);
     }
     first = false;
-    if (isSetTxnId()) {
-      if (!first) sb.append(", ");
-      sb.append("txnId:");
-      sb.append(this.txnId);
-      first = false;
-    }
     if (isSetValidWriteIdList()) {
       if (!first) sb.append(", ");
       sb.append("validWriteIdList:");
@@ -825,8 +745,6 @@ import org.slf4j.LoggerFactory;
 
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
-      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
-      __isset_bitfield = 0;
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
     } catch (org.apache.thrift.TException te) {
       throw new java.io.IOException(te);
@@ -902,15 +820,7 @@ import org.slf4j.LoggerFactory;
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
-          case 6: // TXN_ID
-            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
-              struct.txnId = iprot.readI64();
-              struct.setTxnIdIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 7: // VALID_WRITE_ID_LIST
+          case 6: // VALID_WRITE_ID_LIST
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.validWriteIdList = iprot.readString();
               struct.setValidWriteIdListIsSet(true);
@@ -965,11 +875,6 @@ import org.slf4j.LoggerFactory;
         struct.newPart.write(oprot);
         oprot.writeFieldEnd();
       }
-      if (struct.isSetTxnId()) {
-        oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
-        oprot.writeI64(struct.txnId);
-        oprot.writeFieldEnd();
-      }
       if (struct.validWriteIdList != null) {
         if (struct.isSetValidWriteIdList()) {
           oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
@@ -1008,19 +913,13 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetCatName()) {
         optionals.set(0);
       }
-      if (struct.isSetTxnId()) {
-        optionals.set(1);
-      }
       if (struct.isSetValidWriteIdList()) {
-        optionals.set(2);
+        optionals.set(1);
       }
-      oprot.writeBitSet(optionals, 3);
+      oprot.writeBitSet(optionals, 2);
       if (struct.isSetCatName()) {
         oprot.writeString(struct.catName);
       }
-      if (struct.isSetTxnId()) {
-        oprot.writeI64(struct.txnId);
-      }
       if (struct.isSetValidWriteIdList()) {
         oprot.writeString(struct.validWriteIdList);
       }
@@ -1047,16 +946,12 @@ import org.slf4j.LoggerFactory;
       struct.newPart = new Partition();
       struct.newPart.read(iprot);
       struct.setNewPartIsSet(true);
-      BitSet incoming = iprot.readBitSet(3);
+      BitSet incoming = iprot.readBitSet(2);
       if (incoming.get(0)) {
         struct.catName = iprot.readString();
         struct.setCatNameIsSet(true);
       }
       if (incoming.get(1)) {
-        struct.txnId = iprot.readI64();
-        struct.setTxnIdIsSet(true);
-      }
-      if (incoming.get(2)) {
         struct.validWriteIdList = iprot.readString();
         struct.setValidWriteIdListIsSet(true);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
index 0b38840..6f9053d 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
@@ -40,9 +40,8 @@ import org.slf4j.LoggerFactory;
 
   private static final org.apache.thrift.protocol.TField COL_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("colStats", org.apache.thrift.protocol.TType.LIST, (short)1);
   private static final org.apache.thrift.protocol.TField NEED_MERGE_FIELD_DESC = new org.apache.thrift.protocol.TField("needMerge", org.apache.thrift.protocol.TType.BOOL, (short)2);
-  private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)3);
-  private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)4);
-  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)5);
+  private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)3);
+  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)4);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -52,7 +51,6 @@ import org.slf4j.LoggerFactory;
 
   private List<ColumnStatistics> colStats; // required
   private boolean needMerge; // optional
-  private long txnId; // optional
   private long writeId; // optional
   private String validWriteIdList; // optional
 
@@ -60,9 +58,8 @@ import org.slf4j.LoggerFactory;
   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
     COL_STATS((short)1, "colStats"),
     NEED_MERGE((short)2, "needMerge"),
-    TXN_ID((short)3, "txnId"),
-    WRITE_ID((short)4, "writeId"),
-    VALID_WRITE_ID_LIST((short)5, "validWriteIdList");
+    WRITE_ID((short)3, "writeId"),
+    VALID_WRITE_ID_LIST((short)4, "validWriteIdList");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -81,11 +78,9 @@ import org.slf4j.LoggerFactory;
           return COL_STATS;
         case 2: // NEED_MERGE
           return NEED_MERGE;
-        case 3: // TXN_ID
-          return TXN_ID;
-        case 4: // WRITE_ID
+        case 3: // WRITE_ID
           return WRITE_ID;
-        case 5: // VALID_WRITE_ID_LIST
+        case 4: // VALID_WRITE_ID_LIST
           return VALID_WRITE_ID_LIST;
         default:
           return null;
@@ -128,10 +123,9 @@ import org.slf4j.LoggerFactory;
 
   // isset id assignments
   private static final int __NEEDMERGE_ISSET_ID = 0;
-  private static final int __TXNID_ISSET_ID = 1;
-  private static final int __WRITEID_ISSET_ID = 2;
+  private static final int __WRITEID_ISSET_ID = 1;
   private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.NEED_MERGE,_Fields.TXN_ID,_Fields.WRITE_ID,_Fields.VALID_WRITE_ID_LIST};
+  private static final _Fields optionals[] = {_Fields.NEED_MERGE,_Fields.WRITE_ID,_Fields.VALID_WRITE_ID_LIST};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -140,8 +134,6 @@ import org.slf4j.LoggerFactory;
             new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatistics.class))));
     tmpMap.put(_Fields.NEED_MERGE, new org.apache.thrift.meta_data.FieldMetaData("needMerge", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
-    tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
@@ -151,8 +143,6 @@ import org.slf4j.LoggerFactory;
   }
 
   public SetPartitionsStatsRequest() {
-    this.txnId = -1L;
-
     this.writeId = -1L;
 
   }
@@ -177,7 +167,6 @@ import org.slf4j.LoggerFactory;
       this.colStats = __this__colStats;
     }
     this.needMerge = other.needMerge;
-    this.txnId = other.txnId;
     this.writeId = other.writeId;
     if (other.isSetValidWriteIdList()) {
       this.validWriteIdList = other.validWriteIdList;
@@ -193,8 +182,6 @@ import org.slf4j.LoggerFactory;
     this.colStats = null;
     setNeedMergeIsSet(false);
     this.needMerge = false;
-    this.txnId = -1L;
-
     this.writeId = -1L;
 
     this.validWriteIdList = null;
@@ -260,28 +247,6 @@ import org.slf4j.LoggerFactory;
     __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NEEDMERGE_ISSET_ID, value);
   }
 
-  public long getTxnId() {
-    return this.txnId;
-  }
-
-  public void setTxnId(long txnId) {
-    this.txnId = txnId;
-    setTxnIdIsSet(true);
-  }
-
-  public void unsetTxnId() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
-  }
-
-  /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
-  public boolean isSetTxnId() {
-    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
-  }
-
-  public void setTxnIdIsSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
-  }
-
   public long getWriteId() {
     return this.writeId;
   }
@@ -345,14 +310,6 @@ import org.slf4j.LoggerFactory;
       }
       break;
 
-    case TXN_ID:
-      if (value == null) {
-        unsetTxnId();
-      } else {
-        setTxnId((Long)value);
-      }
-      break;
-
     case WRITE_ID:
       if (value == null) {
         unsetWriteId();
@@ -380,9 +337,6 @@ import org.slf4j.LoggerFactory;
     case NEED_MERGE:
       return isNeedMerge();
 
-    case TXN_ID:
-      return getTxnId();
-
     case WRITE_ID:
       return getWriteId();
 
@@ -404,8 +358,6 @@ import org.slf4j.LoggerFactory;
       return isSetColStats();
     case NEED_MERGE:
       return isSetNeedMerge();
-    case TXN_ID:
-      return isSetTxnId();
     case WRITE_ID:
       return isSetWriteId();
     case VALID_WRITE_ID_LIST:
@@ -445,15 +397,6 @@ import org.slf4j.LoggerFactory;
         return false;
     }
 
-    boolean this_present_txnId = true && this.isSetTxnId();
-    boolean that_present_txnId = true && that.isSetTxnId();
-    if (this_present_txnId || that_present_txnId) {
-      if (!(this_present_txnId && that_present_txnId))
-        return false;
-      if (this.txnId != that.txnId)
-        return false;
-    }
-
     boolean this_present_writeId = true && this.isSetWriteId();
     boolean that_present_writeId = true && that.isSetWriteId();
     if (this_present_writeId || that_present_writeId) {
@@ -489,11 +432,6 @@ import org.slf4j.LoggerFactory;
     if (present_needMerge)
       list.add(needMerge);
 
-    boolean present_txnId = true && (isSetTxnId());
-    list.add(present_txnId);
-    if (present_txnId)
-      list.add(txnId);
-
     boolean present_writeId = true && (isSetWriteId());
     list.add(present_writeId);
     if (present_writeId)
@@ -535,16 +473,6 @@ import org.slf4j.LoggerFactory;
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetTxnId()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
     lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId());
     if (lastComparison != 0) {
       return lastComparison;
@@ -598,12 +526,6 @@ import org.slf4j.LoggerFactory;
       sb.append(this.needMerge);
       first = false;
     }
-    if (isSetTxnId()) {
-      if (!first) sb.append(", ");
-      sb.append("txnId:");
-      sb.append(this.txnId);
-      first = false;
-    }
     if (isSetWriteId()) {
       if (!first) sb.append(", ");
       sb.append("writeId:");
@@ -696,15 +618,7 @@ import org.slf4j.LoggerFactory;
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
-          case 3: // TXN_ID
-            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
-              struct.txnId = iprot.readI64();
-              struct.setTxnIdIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 4: // WRITE_ID
+          case 3: // WRITE_ID
             if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
               struct.writeId = iprot.readI64();
               struct.setWriteIdIsSet(true);
@@ -712,7 +626,7 @@ import org.slf4j.LoggerFactory;
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
-          case 5: // VALID_WRITE_ID_LIST
+          case 4: // VALID_WRITE_ID_LIST
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.validWriteIdList = iprot.readString();
               struct.setValidWriteIdListIsSet(true);
@@ -750,11 +664,6 @@ import org.slf4j.LoggerFactory;
         oprot.writeBool(struct.needMerge);
         oprot.writeFieldEnd();
       }
-      if (struct.isSetTxnId()) {
-        oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
-        oprot.writeI64(struct.txnId);
-        oprot.writeFieldEnd();
-      }
       if (struct.isSetWriteId()) {
         oprot.writeFieldBegin(WRITE_ID_FIELD_DESC);
         oprot.writeI64(struct.writeId);
@@ -795,22 +704,16 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetNeedMerge()) {
         optionals.set(0);
       }
-      if (struct.isSetTxnId()) {
-        optionals.set(1);
-      }
       if (struct.isSetWriteId()) {
-        optionals.set(2);
+        optionals.set(1);
       }
       if (struct.isSetValidWriteIdList()) {
-        optionals.set(3);
+        optionals.set(2);
       }
-      oprot.writeBitSet(optionals, 4);
+      oprot.writeBitSet(optionals, 3);
       if (struct.isSetNeedMerge()) {
         oprot.writeBool(struct.needMerge);
       }
-      if (struct.isSetTxnId()) {
-        oprot.writeI64(struct.txnId);
-      }
       if (struct.isSetWriteId()) {
         oprot.writeI64(struct.writeId);
       }
@@ -834,20 +737,16 @@ import org.slf4j.LoggerFactory;
         }
       }
       struct.setColStatsIsSet(true);
-      BitSet incoming = iprot.readBitSet(4);
+      BitSet incoming = iprot.readBitSet(3);
       if (incoming.get(0)) {
         struct.needMerge = iprot.readBool();
         struct.setNeedMergeIsSet(true);
       }
       if (incoming.get(1)) {
-        struct.txnId = iprot.readI64();
-        struct.setTxnIdIsSet(true);
-      }
-      if (incoming.get(2)) {
         struct.writeId = iprot.readI64();
         struct.setWriteIdIsSet(true);
       }
-      if (incoming.get(3)) {
+      if (incoming.get(2)) {
         struct.validWriteIdList = iprot.readString();
         struct.setValidWriteIdListIsSet(true);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
index 55f2724..ae3a6e2 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
@@ -42,8 +42,7 @@ import org.slf4j.LoggerFactory;
   private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2);
   private static final org.apache.thrift.protocol.TField COL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("colNames", org.apache.thrift.protocol.TType.LIST, (short)3);
   private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4);
-  private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)5);
-  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)6);
+  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)5);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -55,7 +54,6 @@ import org.slf4j.LoggerFactory;
   private String tblName; // required
   private List<String> colNames; // required
   private String catName; // optional
-  private long txnId; // optional
   private String validWriteIdList; // optional
 
   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
@@ -64,8 +62,7 @@ import org.slf4j.LoggerFactory;
     TBL_NAME((short)2, "tblName"),
     COL_NAMES((short)3, "colNames"),
     CAT_NAME((short)4, "catName"),
-    TXN_ID((short)5, "txnId"),
-    VALID_WRITE_ID_LIST((short)6, "validWriteIdList");
+    VALID_WRITE_ID_LIST((short)5, "validWriteIdList");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -88,9 +85,7 @@ import org.slf4j.LoggerFactory;
           return COL_NAMES;
         case 4: // CAT_NAME
           return CAT_NAME;
-        case 5: // TXN_ID
-          return TXN_ID;
-        case 6: // VALID_WRITE_ID_LIST
+        case 5: // VALID_WRITE_ID_LIST
           return VALID_WRITE_ID_LIST;
         default:
           return null;
@@ -132,9 +127,7 @@ import org.slf4j.LoggerFactory;
   }
 
   // isset id assignments
-  private static final int __TXNID_ISSET_ID = 0;
-  private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST};
+  private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.VALID_WRITE_ID_LIST};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -147,8 +140,6 @@ import org.slf4j.LoggerFactory;
             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
     tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
-    tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
     metaDataMap = Collections.unmodifiableMap(tmpMap);
@@ -156,8 +147,6 @@ import org.slf4j.LoggerFactory;
   }
 
   public TableStatsRequest() {
-    this.txnId = -1L;
-
   }
 
   public TableStatsRequest(
@@ -175,7 +164,6 @@ import org.slf4j.LoggerFactory;
    * Performs a deep copy on <i>other</i>.
    */
   public TableStatsRequest(TableStatsRequest other) {
-    __isset_bitfield = other.__isset_bitfield;
     if (other.isSetDbName()) {
       this.dbName = other.dbName;
     }
@@ -189,7 +177,6 @@ import org.slf4j.LoggerFactory;
     if (other.isSetCatName()) {
       this.catName = other.catName;
     }
-    this.txnId = other.txnId;
     if (other.isSetValidWriteIdList()) {
       this.validWriteIdList = other.validWriteIdList;
     }
@@ -205,8 +192,6 @@ import org.slf4j.LoggerFactory;
     this.tblName = null;
     this.colNames = null;
     this.catName = null;
-    this.txnId = -1L;
-
     this.validWriteIdList = null;
   }
 
@@ -317,28 +302,6 @@ import org.slf4j.LoggerFactory;
     }
   }
 
-  public long getTxnId() {
-    return this.txnId;
-  }
-
-  public void setTxnId(long txnId) {
-    this.txnId = txnId;
-    setTxnIdIsSet(true);
-  }
-
-  public void unsetTxnId() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
-  }
-
-  /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
-  public boolean isSetTxnId() {
-    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
-  }
-
-  public void setTxnIdIsSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
-  }
-
   public String getValidWriteIdList() {
     return this.validWriteIdList;
   }
@@ -396,14 +359,6 @@ import org.slf4j.LoggerFactory;
       }
       break;
 
-    case TXN_ID:
-      if (value == null) {
-        unsetTxnId();
-      } else {
-        setTxnId((Long)value);
-      }
-      break;
-
     case VALID_WRITE_ID_LIST:
       if (value == null) {
         unsetValidWriteIdList();
@@ -429,9 +384,6 @@ import org.slf4j.LoggerFactory;
     case CAT_NAME:
       return getCatName();
 
-    case TXN_ID:
-      return getTxnId();
-
     case VALID_WRITE_ID_LIST:
       return getValidWriteIdList();
 
@@ -454,8 +406,6 @@ import org.slf4j.LoggerFactory;
       return isSetColNames();
     case CAT_NAME:
       return isSetCatName();
-    case TXN_ID:
-      return isSetTxnId();
     case VALID_WRITE_ID_LIST:
       return isSetValidWriteIdList();
     }
@@ -511,15 +461,6 @@ import org.slf4j.LoggerFactory;
         return false;
     }
 
-    boolean this_present_txnId = true && this.isSetTxnId();
-    boolean that_present_txnId = true && that.isSetTxnId();
-    if (this_present_txnId || that_present_txnId) {
-      if (!(this_present_txnId && that_present_txnId))
-        return false;
-      if (this.txnId != that.txnId)
-        return false;
-    }
-
     boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
     boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
     if (this_present_validWriteIdList || that_present_validWriteIdList) {
@@ -556,11 +497,6 @@ import org.slf4j.LoggerFactory;
     if (present_catName)
       list.add(catName);
 
-    boolean present_txnId = true && (isSetTxnId());
-    list.add(present_txnId);
-    if (present_txnId)
-      list.add(txnId);
-
     boolean present_validWriteIdList = true && (isSetValidWriteIdList());
     list.add(present_validWriteIdList);
     if (present_validWriteIdList)
@@ -617,16 +553,6 @@ import org.slf4j.LoggerFactory;
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetTxnId()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
     lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
     if (lastComparison != 0) {
       return lastComparison;
@@ -690,12 +616,6 @@ import org.slf4j.LoggerFactory;
       }
       first = false;
     }
-    if (isSetTxnId()) {
-      if (!first) sb.append(", ");
-      sb.append("txnId:");
-      sb.append(this.txnId);
-      first = false;
-    }
     if (isSetValidWriteIdList()) {
       if (!first) sb.append(", ");
       sb.append("validWriteIdList:");
@@ -737,8 +657,6 @@ import org.slf4j.LoggerFactory;
 
   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
     try {
-      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
-      __isset_bitfield = 0;
       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
     } catch (org.apache.thrift.TException te) {
       throw new java.io.IOException(te);
@@ -805,15 +723,7 @@ import org.slf4j.LoggerFactory;
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
-          case 5: // TXN_ID
-            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
-              struct.txnId = iprot.readI64();
-              struct.setTxnIdIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 6: // VALID_WRITE_ID_LIST
+          case 5: // VALID_WRITE_ID_LIST
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.validWriteIdList = iprot.readString();
               struct.setValidWriteIdListIsSet(true);
@@ -863,11 +773,6 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldEnd();
         }
       }
-      if (struct.isSetTxnId()) {
-        oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
-        oprot.writeI64(struct.txnId);
-        oprot.writeFieldEnd();
-      }
       if (struct.validWriteIdList != null) {
         if (struct.isSetValidWriteIdList()) {
           oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
@@ -905,19 +810,13 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetCatName()) {
         optionals.set(0);
       }
-      if (struct.isSetTxnId()) {
-        optionals.set(1);
-      }
       if (struct.isSetValidWriteIdList()) {
-        optionals.set(2);
+        optionals.set(1);
       }
-      oprot.writeBitSet(optionals, 3);
+      oprot.writeBitSet(optionals, 2);
       if (struct.isSetCatName()) {
         oprot.writeString(struct.catName);
       }
-      if (struct.isSetTxnId()) {
-        oprot.writeI64(struct.txnId);
-      }
       if (struct.isSetValidWriteIdList()) {
         oprot.writeString(struct.validWriteIdList);
       }
@@ -941,16 +840,12 @@ import org.slf4j.LoggerFactory;
         }
       }
       struct.setColNamesIsSet(true);
-      BitSet incoming = iprot.readBitSet(3);
+      BitSet incoming = iprot.readBitSet(2);
       if (incoming.get(0)) {
         struct.catName = iprot.readString();
         struct.setCatNameIsSet(true);
       }
       if (incoming.get(1)) {
-        struct.txnId = iprot.readI64();
-        struct.setTxnIdIsSet(true);
-      }
-      if (incoming.get(2)) {
         struct.validWriteIdList = iprot.readString();
         struct.setValidWriteIdListIsSet(true);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TruncateTableRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TruncateTableRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TruncateTableRequest.java
index 9fb79a3..39cc0cf 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TruncateTableRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TruncateTableRequest.java
@@ -41,9 +41,8 @@ import org.slf4j.LoggerFactory;
   private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
   private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)2);
   private static final org.apache.thrift.protocol.TField PART_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("partNames", org.apache.thrift.protocol.TType.LIST, (short)3);
-  private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)4);
-  private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)5);
-  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)6);
+  private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)4);
+  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)5);
 
   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
   static {
@@ -54,7 +53,6 @@ import org.slf4j.LoggerFactory;
   private String dbName; // required
   private String tableName; // required
   private List<String> partNames; // optional
-  private long txnId; // optional
   private long writeId; // optional
   private String validWriteIdList; // optional
 
@@ -63,9 +61,8 @@ import org.slf4j.LoggerFactory;
     DB_NAME((short)1, "dbName"),
     TABLE_NAME((short)2, "tableName"),
     PART_NAMES((short)3, "partNames"),
-    TXN_ID((short)4, "txnId"),
-    WRITE_ID((short)5, "writeId"),
-    VALID_WRITE_ID_LIST((short)6, "validWriteIdList");
+    WRITE_ID((short)4, "writeId"),
+    VALID_WRITE_ID_LIST((short)5, "validWriteIdList");
 
     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
 
@@ -86,11 +83,9 @@ import org.slf4j.LoggerFactory;
           return TABLE_NAME;
         case 3: // PART_NAMES
           return PART_NAMES;
-        case 4: // TXN_ID
-          return TXN_ID;
-        case 5: // WRITE_ID
+        case 4: // WRITE_ID
           return WRITE_ID;
-        case 6: // VALID_WRITE_ID_LIST
+        case 5: // VALID_WRITE_ID_LIST
           return VALID_WRITE_ID_LIST;
         default:
           return null;
@@ -132,10 +127,9 @@ import org.slf4j.LoggerFactory;
   }
 
   // isset id assignments
-  private static final int __TXNID_ISSET_ID = 0;
-  private static final int __WRITEID_ISSET_ID = 1;
+  private static final int __WRITEID_ISSET_ID = 0;
   private byte __isset_bitfield = 0;
-  private static final _Fields optionals[] = {_Fields.PART_NAMES,_Fields.TXN_ID,_Fields.WRITE_ID,_Fields.VALID_WRITE_ID_LIST};
+  private static final _Fields optionals[] = {_Fields.PART_NAMES,_Fields.WRITE_ID,_Fields.VALID_WRITE_ID_LIST};
   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
   static {
     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
@@ -146,8 +140,6 @@ import org.slf4j.LoggerFactory;
     tmpMap.put(_Fields.PART_NAMES, new org.apache.thrift.meta_data.FieldMetaData("partNames", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
-    tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
-        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
     tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
@@ -157,8 +149,6 @@ import org.slf4j.LoggerFactory;
   }
 
   public TruncateTableRequest() {
-    this.txnId = -1L;
-
     this.writeId = -1L;
 
   }
@@ -187,7 +177,6 @@ import org.slf4j.LoggerFactory;
       List<String> __this__partNames = new ArrayList<String>(other.partNames);
       this.partNames = __this__partNames;
     }
-    this.txnId = other.txnId;
     this.writeId = other.writeId;
     if (other.isSetValidWriteIdList()) {
       this.validWriteIdList = other.validWriteIdList;
@@ -203,8 +192,6 @@ import org.slf4j.LoggerFactory;
     this.dbName = null;
     this.tableName = null;
     this.partNames = null;
-    this.txnId = -1L;
-
     this.writeId = -1L;
 
     this.validWriteIdList = null;
@@ -294,28 +281,6 @@ import org.slf4j.LoggerFactory;
     }
   }
 
-  public long getTxnId() {
-    return this.txnId;
-  }
-
-  public void setTxnId(long txnId) {
-    this.txnId = txnId;
-    setTxnIdIsSet(true);
-  }
-
-  public void unsetTxnId() {
-    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
-  }
-
-  /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
-  public boolean isSetTxnId() {
-    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
-  }
-
-  public void setTxnIdIsSet(boolean value) {
-    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
-  }
-
   public long getWriteId() {
     return this.writeId;
   }
@@ -387,14 +352,6 @@ import org.slf4j.LoggerFactory;
       }
       break;
 
-    case TXN_ID:
-      if (value == null) {
-        unsetTxnId();
-      } else {
-        setTxnId((Long)value);
-      }
-      break;
-
     case WRITE_ID:
       if (value == null) {
         unsetWriteId();
@@ -425,9 +382,6 @@ import org.slf4j.LoggerFactory;
     case PART_NAMES:
       return getPartNames();
 
-    case TXN_ID:
-      return getTxnId();
-
     case WRITE_ID:
       return getWriteId();
 
@@ -451,8 +405,6 @@ import org.slf4j.LoggerFactory;
       return isSetTableName();
     case PART_NAMES:
       return isSetPartNames();
-    case TXN_ID:
-      return isSetTxnId();
     case WRITE_ID:
       return isSetWriteId();
     case VALID_WRITE_ID_LIST:
@@ -501,15 +453,6 @@ import org.slf4j.LoggerFactory;
         return false;
     }
 
-    boolean this_present_txnId = true && this.isSetTxnId();
-    boolean that_present_txnId = true && that.isSetTxnId();
-    if (this_present_txnId || that_present_txnId) {
-      if (!(this_present_txnId && that_present_txnId))
-        return false;
-      if (this.txnId != that.txnId)
-        return false;
-    }
-
     boolean this_present_writeId = true && this.isSetWriteId();
     boolean that_present_writeId = true && that.isSetWriteId();
     if (this_present_writeId || that_present_writeId) {
@@ -550,11 +493,6 @@ import org.slf4j.LoggerFactory;
     if (present_partNames)
       list.add(partNames);
 
-    boolean present_txnId = true && (isSetTxnId());
-    list.add(present_txnId);
-    if (present_txnId)
-      list.add(txnId);
-
     boolean present_writeId = true && (isSetWriteId());
     list.add(present_writeId);
     if (present_writeId)
@@ -606,16 +544,6 @@ import org.slf4j.LoggerFactory;
         return lastComparison;
       }
     }
-    lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
-    if (lastComparison != 0) {
-      return lastComparison;
-    }
-    if (isSetTxnId()) {
-      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
-      if (lastComparison != 0) {
-        return lastComparison;
-      }
-    }
     lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId());
     if (lastComparison != 0) {
       return lastComparison;
@@ -681,12 +609,6 @@ import org.slf4j.LoggerFactory;
       }
       first = false;
     }
-    if (isSetTxnId()) {
-      if (!first) sb.append(", ");
-      sb.append("txnId:");
-      sb.append(this.txnId);
-      first = false;
-    }
     if (isSetWriteId()) {
       if (!first) sb.append(", ");
       sb.append("writeId:");
@@ -790,15 +712,7 @@ import org.slf4j.LoggerFactory;
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
-          case 4: // TXN_ID
-            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
-              struct.txnId = iprot.readI64();
-              struct.setTxnIdIsSet(true);
-            } else { 
-              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
-            }
-            break;
-          case 5: // WRITE_ID
+          case 4: // WRITE_ID
             if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
               struct.writeId = iprot.readI64();
               struct.setWriteIdIsSet(true);
@@ -806,7 +720,7 @@ import org.slf4j.LoggerFactory;
               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
             }
             break;
-          case 6: // VALID_WRITE_ID_LIST
+          case 5: // VALID_WRITE_ID_LIST
             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
               struct.validWriteIdList = iprot.readString();
               struct.setValidWriteIdListIsSet(true);
@@ -851,11 +765,6 @@ import org.slf4j.LoggerFactory;
           oprot.writeFieldEnd();
         }
       }
-      if (struct.isSetTxnId()) {
-        oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
-        oprot.writeI64(struct.txnId);
-        oprot.writeFieldEnd();
-      }
       if (struct.isSetWriteId()) {
         oprot.writeFieldBegin(WRITE_ID_FIELD_DESC);
         oprot.writeI64(struct.writeId);
@@ -891,16 +800,13 @@ import org.slf4j.LoggerFactory;
       if (struct.isSetPartNames()) {
         optionals.set(0);
       }
-      if (struct.isSetTxnId()) {
-        optionals.set(1);
-      }
       if (struct.isSetWriteId()) {
-        optionals.set(2);
+        optionals.set(1);
       }
       if (struct.isSetValidWriteIdList()) {
-        optionals.set(3);
+        optionals.set(2);
       }
-      oprot.writeBitSet(optionals, 4);
+      oprot.writeBitSet(optionals, 3);
       if (struct.isSetPartNames()) {
         {
           oprot.writeI32(struct.partNames.size());
@@ -910,9 +816,6 @@ import org.slf4j.LoggerFactory;
           }
         }
       }
-      if (struct.isSetTxnId()) {
-        oprot.writeI64(struct.txnId);
-      }
       if (struct.isSetWriteId()) {
         oprot.writeI64(struct.writeId);
       }
@@ -928,7 +831,7 @@ import org.slf4j.LoggerFactory;
       struct.setDbNameIsSet(true);
       struct.tableName = iprot.readString();
       struct.setTableNameIsSet(true);
-      BitSet incoming = iprot.readBitSet(4);
+      BitSet incoming = iprot.readBitSet(3);
       if (incoming.get(0)) {
         {
           org.apache.thrift.protocol.TList _list83 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
@@ -943,14 +846,10 @@ import org.slf4j.LoggerFactory;
         struct.setPartNamesIsSet(true);
       }
       if (incoming.get(1)) {
-        struct.txnId = iprot.readI64();
-        struct.setTxnIdIsSet(true);
-      }
-      if (incoming.get(2)) {
         struct.writeId = iprot.readI64();
         struct.setWriteIdIsSet(true);
       }
-      if (incoming.get(3)) {
+      if (incoming.get(2)) {
         struct.validWriteIdList = iprot.readString();
         struct.setValidWriteIdListIsSet(true);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/cdb32a7f/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
index fe631b7..5ed4f71 100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
@@ -3497,10 +3497,6 @@ class TruncateTableRequest {
   /**
    * @var int
    */
-  public $txnId = -1;
-  /**
-   * @var int
-   */
   public $writeId = -1;
   /**
    * @var string
@@ -3527,14 +3523,10 @@ class TruncateTableRequest {
             ),
           ),
         4 => array(
-          'var' => 'txnId',
-          'type' => TType::I64,
-          ),
-        5 => array(
           'var' => 'writeId',
           'type' => TType::I64,
           ),
-        6 => array(
+        5 => array(
           'var' => 'validWriteIdList',
           'type' => TType::STRING,
           ),
@@ -3550,9 +3542,6 @@ class TruncateTableRequest {
       if (isset($vals['partNames'])) {
         $this->partNames = $vals['partNames'];
       }
-      if (isset($vals['txnId'])) {
-        $this->txnId = $vals['txnId'];
-      }
       if (isset($vals['writeId'])) {
         $this->writeId = $vals['writeId'];
       }
@@ -3614,19 +3603,12 @@ class TruncateTableRequest {
           break;
         case 4:
           if ($ftype == TType::I64) {
-            $xfer += $input->readI64($this->txnId);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 5:
-          if ($ftype == TType::I64) {
             $xfer += $input->readI64($this->writeId);
           } else {
             $xfer += $input->skip($ftype);
           }
           break;
-        case 6:
+        case 5:
           if ($ftype == TType::STRING) {
             $xfer += $input->readString($this->validWriteIdList);
           } else {
@@ -3673,18 +3655,13 @@ class TruncateTableRequest {
       }
       $xfer += $output->writeFieldEnd();
     }
-    if ($this->txnId !== null) {
-      $xfer += $output->writeFieldBegin('txnId', TType::I64, 4);
-      $xfer += $output->writeI64($this->txnId);
-      $xfer += $output->writeFieldEnd();
-    }
     if ($this->writeId !== null) {
-      $xfer += $output->writeFieldBegin('writeId', TType::I64, 5);
+      $xfer += $output->writeFieldBegin('writeId', TType::I64, 4);
       $xfer += $output->writeI64($this->writeId);
       $xfer += $output->writeFieldEnd();
     }
     if ($this->validWriteIdList !== null) {
-      $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 6);
+      $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 5);
       $xfer += $output->writeString($this->validWriteIdList);
       $xfer += $output->writeFieldEnd();
     }
@@ -10604,10 +10581,6 @@ class SetPartitionsStatsRequest {
   /**
    * @var int
    */
-  public $txnId = -1;
-  /**
-   * @var int
-   */
   public $writeId = -1;
   /**
    * @var string
@@ -10631,14 +10604,10 @@ class SetPartitionsStatsRequest {
           'type' => TType::BOOL,
           ),
         3 => array(
-          'var' => 'txnId',
-          'type' => TType::I64,
-          ),
-        4 => array(
           'var' => 'writeId',
           'type' => TType::I64,
           ),
-        5 => array(
+        4 => array(
           'var' => 'validWriteIdList',
           'type' => TType::STRING,
           ),
@@ -10651,9 +10620,6 @@ class SetPartitionsStatsRequest {
       if (isset($vals['needMerge'])) {
         $this->needMerge = $vals['needMerge'];
       }
-      if (isset($vals['txnId'])) {
-        $this->txnId = $vals['txnId'];
-      }
       if (isset($vals['writeId'])) {
         $this->writeId = $vals['writeId'];
       }
@@ -10709,19 +10675,12 @@ class SetPartitionsStatsRequest {
           break;
         case 3:
           if ($ftype == TType::I64) {
-            $xfer += $input->readI64($this->txnId);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 4:
-          if ($ftype == TType::I64) {
             $xfer += $input->readI64($this->writeId);
           } else {
             $xfer += $input->skip($ftype);
           }
           break;
-        case 5:
+        case 4:
           if ($ftype == TType::STRING) {
             $xfer += $input->readString($this->validWriteIdList);
           } else {
@@ -10763,18 +10722,13 @@ class SetPartitionsStatsRequest {
       $xfer += $output->writeBool($this->needMerge);
       $xfer += $output->writeFieldEnd();
     }
-    if ($this->txnId !== null) {
-      $xfer += $output->writeFieldBegin('txnId', TType::I64, 3);
-      $xfer += $output->writeI64($this->txnId);
-      $xfer += $output->writeFieldEnd();
-    }
     if ($this->writeId !== null) {
-      $xfer += $output->writeFieldBegin('writeId', TType::I64, 4);
+      $xfer += $output->writeFieldBegin('writeId', TType::I64, 3);
       $xfer += $output->writeI64($this->writeId);
       $xfer += $output->writeFieldEnd();
     }
     if ($this->validWriteIdList !== null) {
-      $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 5);
+      $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 4);
       $xfer += $output->writeString($this->validWriteIdList);
       $xfer += $output->writeFieldEnd();
     }
@@ -13900,10 +13854,6 @@ class TableStatsRequest {
    */
   public $catName = null;
   /**
-   * @var int
-   */
-  public $txnId = -1;
-  /**
    * @var string
    */
   public $validWriteIdList = null;
@@ -13932,10 +13882,6 @@ class TableStatsRequest {
           'type' => TType::STRING,
           ),
         5 => array(
-          'var' => 'txnId',
-          'type' => TType::I64,
-          ),
-        6 => array(
           'var' => 'validWriteIdList',
           'type' => TType::STRING,
           ),
@@ -13954,9 +13900,6 @@ class TableStatsRequest {
       if (isset($vals['catName'])) {
         $this->catName = $vals['catName'];
       }
-      if (isset($vals['txnId'])) {
-        $this->txnId = $vals['txnId'];
-      }
       if (isset($vals['validWriteIdList'])) {
         $this->validWriteIdList = $vals['validWriteIdList'];
       }
@@ -14021,13 +13964,6 @@ class TableStatsRequest {
           }
           break;
         case 5:
-          if ($ftype == TType::I64) {
-            $xfer += $input->readI64($this->txnId);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 6:
           if ($ftype == TType::STRING) {
             $xfer += $input->readString($this->validWriteIdList);
           } else {
@@ -14079,13 +14015,8 @@ class TableStatsRequest {
       $xfer += $output->writeString($this->catName);
       $xfer += $output->writeFieldEnd();
     }
-    if ($this->txnId !== null) {
-      $xfer += $output->writeFieldBegin('txnId', TType::I64, 5);
-      $xfer += $output->writeI64($this->txnId);
-      $xfer += $output->writeFieldEnd();
-    }
     if ($this->validWriteIdList !== null) {
-      $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 6);
+      $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 5);
       $xfer += $output->writeString($this->validWriteIdList);
       $xfer += $output->writeFieldEnd();
     }
@@ -14120,10 +14051,6 @@ class PartitionsStatsRequest {
    */
   public $catName = null;
   /**
-   * @var int
-   */
-  public $txnId = -1;
-  /**
    * @var string
    */
   public $validWriteIdList = null;
@@ -14160,10 +14087,6 @@ class PartitionsStatsRequest {
           'type' => TType::STRING,
           ),
         6 => array(
-          'var' => 'txnId',
-          'type' => TType::I64,
-          ),
-        7 => array(
           'var' => 'validWriteIdList',
           'type' => TType::STRING,
           ),
@@ -14185,9 +14108,6 @@ class PartitionsStatsRequest {
       if (isset($vals['catName'])) {
         $this->catName = $vals['catName'];
       }
-      if (isset($vals['txnId'])) {
-        $this->txnId = $vals['txnId'];
-      }
       if (isset($vals['validWriteIdList'])) {
         $this->validWriteIdList = $vals['validWriteIdList'];
       }
@@ -14269,13 +14189,6 @@ class PartitionsStatsRequest {
           }
           break;
         case 6:
-          if ($ftype == TType::I64) {
-            $xfer += $input->readI64($this->txnId);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 7:
           if ($ftype == TType::STRING) {
             $xfer += $input->readString($this->validWriteIdList);
           } else {
@@ -14344,13 +14257,8 @@ class PartitionsStatsRequest {
       $xfer += $output->writeString($this->catName);
       $xfer += $output->writeFieldEnd();
     }
-    if ($this->txnId !== null) {
-      $xfer += $output->writeFieldBegin('txnId', TType::I64, 6);
-      $xfer += $output->writeI64($this->txnId);
-      $xfer += $output->writeFieldEnd();
-    }
     if ($this->validWriteIdList !== null) {
-      $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 7);
+      $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 6);
       $xfer += $output->writeString($this->validWriteIdList);
       $xfer += $output->writeFieldEnd();
     }
@@ -14515,10 +14423,6 @@ class AddPartitionsRequest {
    */
   public $catName = null;
   /**
-   * @var int
-   */
-  public $txnId = -1;
-  /**
    * @var string
    */
   public $validWriteIdList = null;
@@ -14556,10 +14460,6 @@ class AddPartitionsRequest {
           'type' => TType::STRING,
           ),
         7 => array(
-          'var' => 'txnId',
-          'type' => TType::I64,
-          ),
-        8 => array(
           'var' => 'validWriteIdList',
           'type' => TType::STRING,
           ),
@@ -14584,9 +14484,6 @@ class AddPartitionsRequest {
       if (isset($vals['catName'])) {
         $this->catName = $vals['catName'];
       }
-      if (isset($vals['txnId'])) {
-        $this->txnId = $vals['txnId'];
-      }
       if (isset($vals['validWriteIdList'])) {
         $this->validWriteIdList = $vals['validWriteIdList'];
       }
@@ -14666,13 +14563,6 @@ class AddPartitionsRequest {
           }
           break;
         case 7:
-          if ($ftype == TType::I64) {
-            $xfer += $input->readI64($this->txnId);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 8:
           if ($ftype == TType::STRING) {
             $xfer += $input->readString($this->validWriteIdList);
           } else {
@@ -14734,13 +14624,8 @@ class AddPartitionsRequest {
       $xfer += $output->writeString($this->catName);
       $xfer += $output->writeFieldEnd();
     }
-    if ($this->txnId !== null) {
-      $xfer += $output->writeFieldBegin('txnId', TType::I64, 7);
-      $xfer += $output->writeI64($this->txnId);
-      $xfer += $output->writeFieldEnd();
-    }
     if ($this->validWriteIdList !== null) {
-      $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 8);
+      $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 7);
       $xfer += $output->writeString($this->validWriteIdList);
       $xfer += $output->writeFieldEnd();
     }
@@ -25005,10 +24890,6 @@ class GetTableRequest {
    */
   public $catName = null;
   /**
-   * @var int
-   */
-  public $txnId = -1;
-  /**
    * @var string
    */
   public $validWriteIdList = null;
@@ -25033,10 +24914,6 @@ class GetTableRequest {
           'var' => 'catName',
           'type' => TType::STRING,
           ),
-        5 => array(
-          'var' => 'txnId',
-          'type' => TType::I64,
-          ),
         6 => array(
           'var' => 'validWriteIdList',
           'type' => TType::STRING,
@@ -25056,9 +24933,6 @@ class GetTableRequest {
       if (isset($vals['catName'])) {
         $this->catName = $vals['catName'];
       }
-      if (isset($vals['txnId'])) {
-        $this->txnId = $vals['txnId'];
-      }
       if (isset($vals['validWriteIdList'])) {
         $this->validWriteIdList = $vals['validWriteIdList'];
       }
@@ -25113,13 +24987,6 @@ class GetTableRequest {
             $xfer += $input->skip($ftype);
           }
           break;
-        case 5:
-          if ($ftype == TType::I64) {
-            $xfer += $input->readI64($this->txnId);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
         case 6:
           if ($ftype == TType::STRING) {
             $xfer += $input->readString($this->validWriteIdList);
@@ -25163,11 +25030,6 @@ class GetTableRequest {
       $xfer += $output->writeString($this->catName);
       $xfer += $output->writeFieldEnd();
     }
-    if ($this->txnId !== null) {
-      $xfer += $output->writeFieldBegin('txnId', TType::I64, 5);
-      $xfer += $output->writeI64($this->txnId);
-      $xfer += $output->writeFieldEnd();
-    }
     if ($this->validWriteIdList !== null) {
       $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 6);
       $xfer += $output->writeString($this->validWriteIdList);
@@ -31706,10 +31568,6 @@ class AlterPartitionsRequest {
   /**
    * @var int
    */
-  public $txnId = -1;
-  /**
-   * @var int
-   */
   public $writeId = -1;
   /**
    * @var string
@@ -31746,14 +31604,10 @@ class AlterPartitionsRequest {
           'class' => '\metastore\EnvironmentContext',
           ),
         6 => array(
-          'var' => 'txnId',
-          'type' => TType::I64,
-          ),
-        7 => array(
           'var' => 'writeId',
           'type' => TType::I64,
           ),
-        8 => array(
+        7 => array(
           'var' => 'validWriteIdList',
           'type' => TType::STRING,
           ),
@@ -31775,9 +31629,6 @@ class AlterPartitionsRequest {
       if (isset($vals['environmentContext'])) {
         $this->environmentContext = $vals['environmentContext'];
       }
-      if (isset($vals['txnId'])) {
-        $this->txnId = $vals['txnId'];
-      }
       if (isset($vals['writeId'])) {
         $this->writeId = $vals['writeId'];
       }
@@ -31855,19 +31706,12 @@ class AlterPartitionsRequest {
           break;
         case 6:
           if ($ftype == TType::I64) {
-            $xfer += $input->readI64($this->txnId);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 7:
-          if ($ftype == TType::I64) {
             $xfer += $input->readI64($this->writeId);
           } else {
             $xfer += $input->skip($ftype);
           }
           break;
-        case 8:
+        case 7:
           if ($ftype == TType::STRING) {
             $xfer += $input->readString($this->validWriteIdList);
           } else {
@@ -31927,18 +31771,13 @@ class AlterPartitionsRequest {
       $xfer += $this->environmentContext->write($output);
       $xfer += $output->writeFieldEnd();
     }
-    if ($this->txnId !== null) {
-      $xfer += $output->writeFieldBegin('txnId', TType::I64, 6);
-      $xfer += $output->writeI64($this->txnId);
-      $xfer += $output->writeFieldEnd();
-    }
     if ($this->writeId !== null) {
-      $xfer += $output->writeFieldBegin('writeId', TType::I64, 7);
+      $xfer += $output->writeFieldBegin('writeId', TType::I64, 6);
       $xfer += $output->writeI64($this->writeId);
       $xfer += $output->writeFieldEnd();
     }
     if ($this->validWriteIdList !== null) {
-      $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 8);
+      $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 7);
       $xfer += $output->writeString($this->validWriteIdList);
       $xfer += $output->writeFieldEnd();
     }
@@ -32023,10 +31862,6 @@ class RenamePartitionRequest {
    */
   public $newPart = null;
   /**
-   * @var int
-   */
-  public $txnId = -1;
-  /**
    * @var string
    */
   public $validWriteIdList = null;
@@ -32060,10 +31895,6 @@ class RenamePartitionRequest {
           'class' => '\metastore\Partition',
           ),
         6 => array(
-          'var' => 'txnId',
-          'type' => TType::I64,
-          ),
-        7 => array(
           'var' => 'validWriteIdList',
           'type' => TType::STRING,
           ),
@@ -32085,9 +31916,6 @@ class RenamePartitionRequest {
       if (isset($vals['newPart'])) {
         $this->newPart = $vals['newPart'];
       }
-      if (isset($vals['txnId'])) {
-        $this->txnId = $vals['txnId'];
-      }
       if (isset($vals['validWriteIdList'])) {
         $this->validWriteIdList = $vals['validWriteIdList'];
       }
@@ -32160,13 +31988,6 @@ class RenamePartitionRequest {
           }
           break;
         case 6:
-          if ($ftype == TType::I64) {
-            $xfer += $input->readI64($this->txnId);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 7:
           if ($ftype == TType::STRING) {
             $xfer += $input->readString($this->validWriteIdList);
           } else {
@@ -32226,13 +32047,8 @@ class RenamePartitionRequest {
       $xfer += $this->newPart->write($output);
       $xfer += $output->writeFieldEnd();
     }
-    if ($this->txnId !== null) {
-      $xfer += $output->writeFieldBegin('txnId', TType::I64, 6);
-      $xfer += $output->writeI64($this->txnId);
-      $xfer += $output->writeFieldEnd();
-    }
     if ($this->validWriteIdList !== null) {
-      $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 7);
+      $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 6);
       $xfer += $output->writeString($this->validWriteIdList);
       $xfer += $output->writeFieldEnd();
     }
@@ -32319,10 +32135,6 @@ class AlterTableRequest {
   /**
    * @var int
    */
-  public $txnId = -1;
-  /**
-   * @var int
-   */
   public $writeId = -1;
   /**
    * @var string
@@ -32355,14 +32167,10 @@ class AlterTableRequest {
           'class' => '\metastore\EnvironmentContext',
           ),
         6 => array(
-          'var' => 'txnId',
-          'type' => TType::I64,
-          ),
-        7 => array(
           'var' => 'writeId',
           'type' => TType::I64,
           ),
-        8 => array(
+        7 => array(
           'var' => 'validWriteIdList',
           'type' => TType::STRING,
           ),
@@ -32384,9 +32192,6 @@ class AlterTableRequest {
       if (isset($vals['environmentContext'])) {
         $this->environmentContext = $vals['environmentContext'];
       }
-      if (isset($vals['txnId'])) {
-        $this->txnId = $vals['txnId'];
-      }
       if (isset($vals['writeId'])) {
         $this->writeId = $vals['writeId'];
       }
@@ -32454,19 +32259,12 @@ class AlterTableRequest {
           break;
         case 6:
           if ($ftype == TType::I64) {
-            $xfer += $input->readI64($this->txnId);
-          } else {
-            $xfer += $input->skip($ftype);
-          }
-          break;
-        case 7:
-          if ($ftype == TType::I64) {
             $xfer += $input->readI64($this->writeId);
           } else {
             $xfer += $input->skip($ftype);
           }
           break;
-        case 8:
+        case 7:
           if ($ftype == TType::STRING) {
             $xfer += $input->readString($this->validWriteIdList);
           } else {
@@ -32517,18 +32315,13 @@ class AlterTableRequest {
       $xfer += $this->environmentContext->write($output);
       $xfer += $output->writeFieldEnd();
     }
-    if ($this->txnId !== null) {
-      $xfer += $output->writeFieldBegin('txnId', TType::I64, 6);
-      $xfer += $output->writeI64($this->txnId);
-      $xfer += $output->writeFieldEnd();
-    }
     if ($this->writeId !== null) {
-      $xfer += $output->writeFieldBegin('writeId', TType::I64, 7);
+      $xfer += $output->writeFieldBegin('writeId', TType::I64, 6);
       $xfer += $output->writeI64($this->writeId);
       $xfer += $output->writeFieldEnd();
     }
     if ($this->validWriteIdList !== null) {
-      $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 8);
+      $xfer += $output->writeFieldBegin('validWriteIdList', TType::STRING, 7);
       $xfer += $output->writeString($this->validWriteIdList);
       $xfer += $output->writeFieldEnd();
     }


[43/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0722

Posted by se...@apache.org.
HIVE-19416 : merge master into branch (Sergey Shelukhin) 0722


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b17a3471
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b17a3471
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b17a3471

Branch: refs/heads/master
Commit: b17a3471c93216976a9224c2c827b72e45c9d37d
Parents: 2cbe133 92ecdd9
Author: sergey <se...@apache.org>
Authored: Sun Jul 22 21:02:43 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Sun Jul 22 21:02:43 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hive/jdbc/TestJdbcDriver2.java   | 31 ++++++++++++++++++++
 .../apache/hive/jdbc/HivePreparedStatement.java |  3 +-
 .../org/apache/hive/jdbc/HiveStatement.java     |  3 +-
 3 files changed, 34 insertions(+), 3 deletions(-)
----------------------------------------------------------------------



[02/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
index 0000000,54bf3d7..0aab253
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
@@@ -1,0 -1,1117 +1,1154 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore.client;
+ 
++import java.net.ProtocolException;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.List;
+ 
+ import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+ import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
+ import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest;
+ import org.apache.hadoop.hive.metastore.api.Catalog;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+ import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService;
+ import org.apache.thrift.TException;
++import org.apache.thrift.protocol.TProtocolException;
+ import org.apache.thrift.transport.TTransportException;
+ 
+ import com.google.common.collect.Lists;
+ 
+ import org.junit.After;
+ import org.junit.AfterClass;
+ import org.junit.Assert;
+ import org.junit.Before;
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ import org.junit.runner.RunWith;
+ import org.junit.runners.Parameterized;
+ 
+ import static java.util.stream.Collectors.joining;
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
+ import static org.junit.Assert.assertEquals;
+ import static org.junit.Assert.assertFalse;
+ import static org.junit.Assert.assertNotEquals;
+ import static org.junit.Assert.assertTrue;
+ import static org.junit.Assert.fail;
+ 
+ /**
+  * API tests for HMS client's  alterPartitions methods.
+  */
+ @RunWith(Parameterized.class)
+ @Category(MetastoreCheckinTest.class)
+ public class TestAlterPartitions extends MetaStoreClientTest {
+   private static final int NEW_CREATE_TIME = 123456789;
+   private AbstractMetaStoreService metaStore;
+   private IMetaStoreClient client;
+ 
+   private static final String DB_NAME = "testpartdb";
+   private static final String TABLE_NAME = "testparttable";
+   private static final List<String> PARTCOL_SCHEMA = Lists.newArrayList("yyyy", "mm", "dd");
+ 
+   public TestAlterPartitions(String name, AbstractMetaStoreService metaStore) {
+     this.metaStore = metaStore;
+   }
+ 
+   @Before
+   public void setUp() throws Exception {
+     // Get new client
+     client = metaStore.getClient();
+ 
+     // Clean up the database
+     client.dropDatabase(DB_NAME, true, true, true);
+     metaStore.cleanWarehouseDirs();
+     createDB(DB_NAME);
+   }
+ 
+   @After
+   public void tearDown() throws Exception {
+     try {
+       if (client != null) {
+         try {
+           client.close();
+         } catch (Exception e) {
+           // HIVE-19729: Shallow the exceptions based on the discussion in the Jira
+         }
+       }
+     } finally {
+       client = null;
+     }
+   }
+ 
+   private void createDB(String dbName) throws TException {
+     new DatabaseBuilder().
+             setName(dbName).
+             create(client, metaStore.getConf());
+   }
+ 
+   private Table createTestTable(IMetaStoreClient client, String dbName, String tableName,
+                                        List<String> partCols, boolean setPartitionLevelPrivilages)
+           throws Exception {
+     TableBuilder builder = new TableBuilder()
+             .setDbName(dbName)
+             .setTableName(tableName)
+             .addCol("id", "int")
+             .addCol("name", "string");
+ 
+     partCols.forEach(col -> builder.addPartCol(col, "string"));
+     Table table = builder.build(metaStore.getConf());
+ 
+     if (setPartitionLevelPrivilages) {
+       table.putToParameters("PARTITION_LEVEL_PRIVILEGE", "true");
+     }
+ 
+     client.createTable(table);
+     return table;
+   }
+ 
+   private void addPartition(IMetaStoreClient client, Table table, List<String> values)
+           throws TException {
+     PartitionBuilder partitionBuilder = new PartitionBuilder().inTable(table);
+     values.forEach(val -> partitionBuilder.addValue(val));
+     client.add_partition(partitionBuilder.build(metaStore.getConf()));
+   }
+ 
+   private List<List<String>> createTable4PartColsParts(IMetaStoreClient client) throws
+           Exception {
+     Table t = createTestTable(client, DB_NAME, TABLE_NAME, PARTCOL_SCHEMA, false);
+     List<List<String>> testValues = Lists.newArrayList(
+             Lists.newArrayList("1999", "01", "02"),
+             Lists.newArrayList("2009", "02", "10"),
+             Lists.newArrayList("2017", "10", "26"),
+             Lists.newArrayList("2017", "11", "27"));
+ 
+     for(List<String> vals : testValues){
+       addPartition(client, t, vals);
+     }
+ 
+     return testValues;
+   }
+ 
+   private static void assertPartitionsHaveCorrectValues(List<Partition> partitions,
+                                     List<List<String>> testValues) throws Exception {
+     assertEquals(testValues.size(), partitions.size());
+     for (int i = 0; i < partitions.size(); ++i) {
+       assertEquals(testValues.get(i), partitions.get(i).getValues());
+     }
+   }
+ 
+   private static void makeTestChangesOnPartition(Partition partition) {
+     partition.getParameters().put("hmsTestParam001", "testValue001");
+     partition.setCreateTime(NEW_CREATE_TIME);
+     partition.setLastAccessTime(NEW_CREATE_TIME);
+     partition.getSd().setLocation(partition.getSd().getLocation()+"/hh=01");
+     partition.getSd().getCols().add(new FieldSchema("newcol", "string", ""));
+   }
+ 
+   private void assertPartitionUnchanged(Partition partition, List<String> testValues,
+                                                List<String> partCols) throws MetaException {
+     assertFalse(partition.getParameters().containsKey("hmsTestParam001"));
+ 
+     List<String> expectedKVPairs = new ArrayList<>();
+     for (int i = 0; i < partCols.size(); ++i) {
+       expectedKVPairs.add(partCols.get(i) + "=" + testValues.get(i));
+     }
+     String partPath = expectedKVPairs.stream().collect(joining("/"));
+     assertTrue(partition.getSd().getLocation().equals(metaStore.getWarehouseRoot()
+         + "/testpartdb.db/testparttable/" + partPath));
+     assertNotEquals(NEW_CREATE_TIME, partition.getCreateTime());
+     assertNotEquals(NEW_CREATE_TIME, partition.getLastAccessTime());
+     assertEquals(2, partition.getSd().getCols().size());
+   }
+ 
+   private void assertPartitionChanged(Partition partition, List<String> testValues,
+                                       List<String> partCols) throws MetaException {
+     assertEquals("testValue001", partition.getParameters().get("hmsTestParam001"));
+ 
+     List<String> expectedKVPairs = new ArrayList<>();
+     for (int i = 0; i < partCols.size(); ++i) {
+       expectedKVPairs.add(partCols.get(i) + "=" + testValues.get(i));
+     }
+     String partPath = expectedKVPairs.stream().collect(joining("/"));
+     assertTrue(partition.getSd().getLocation().equals(metaStore.getWarehouseRoot()
+         + "/testpartdb.db/testparttable/" + partPath + "/hh=01"));
+     assertEquals(NEW_CREATE_TIME, partition.getCreateTime());
+     assertEquals(NEW_CREATE_TIME, partition.getLastAccessTime());
+     assertEquals(3, partition.getSd().getCols().size());
+   }
+ 
+ 
+ 
+   /**
+    * Testing alter_partition(String,String,Partition) ->
+    *         alter_partition_with_environment_context(String,String,Partition,null).
+    */
+   @Test
+   public void testAlterPartition() throws Exception {
+     List<List<String>> testValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition oldPart = oldParts.get(3);
+ 
+     assertPartitionUnchanged(oldPart, testValues.get(3), PARTCOL_SCHEMA);
+     makeTestChangesOnPartition(oldPart);
+ 
+     client.alter_partition(DB_NAME, TABLE_NAME, oldPart);
+ 
+     List<Partition> newParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition newPart = newParts.get(3);
+     assertPartitionChanged(newPart, testValues.get(3), PARTCOL_SCHEMA);
+     assertPartitionsHaveCorrectValues(newParts, testValues);
+ 
+   }
+ 
+   @Test
+   public void otherCatalog() throws TException {
+     String catName = "alter_partition_catalog";
+     Catalog cat = new CatalogBuilder()
+         .setName(catName)
+         .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName))
+         .build();
+     client.createCatalog(cat);
+ 
+     String dbName = "alter_partition_database_in_other_catalog";
+     Database db = new DatabaseBuilder()
+         .setName(dbName)
+         .setCatalogName(catName)
+         .create(client, metaStore.getConf());
+ 
+     String tableName = "table_in_other_catalog";
+     Table table = new TableBuilder()
+         .inDb(db)
+         .setTableName(tableName)
+         .addCol("id", "int")
+         .addCol("name", "string")
+         .addPartCol("partcol", "string")
+         .create(client, metaStore.getConf());
+ 
+     Partition[] parts = new Partition[5];
+     for (int i = 0; i < 5; i++) {
+       parts[i] = new PartitionBuilder()
+           .inTable(table)
+           .addValue("a" + i)
+           .setLocation(MetaStoreTestUtils.getTestWarehouseDir("b" + i))
+           .build(metaStore.getConf());
+     }
+     client.add_partitions(Arrays.asList(parts));
+ 
+     Partition newPart =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a0"));
+     newPart.getParameters().put("test_key", "test_value");
+     client.alter_partition(catName, dbName, tableName, newPart);
+ 
+     Partition fetched =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a0"));
+     Assert.assertEquals(catName, fetched.getCatName());
+     Assert.assertEquals("test_value", fetched.getParameters().get("test_key"));
+ 
+     newPart =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a1"));
+     newPart.setLastAccessTime(3);
+     Partition newPart1 =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a2"));
+     newPart1.getSd().setLocation(MetaStoreTestUtils.getTestWarehouseDir("somewhere"));
+     client.alter_partitions(catName, dbName, tableName, Arrays.asList(newPart, newPart1));
+     fetched =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a1"));
+     Assert.assertEquals(catName, fetched.getCatName());
+     Assert.assertEquals(3L, fetched.getLastAccessTime());
+     fetched =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a2"));
+     Assert.assertEquals(catName, fetched.getCatName());
+     Assert.assertTrue(fetched.getSd().getLocation().contains("somewhere"));
+ 
+     newPart =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a4"));
+     newPart.getParameters().put("test_key", "test_value");
+     EnvironmentContext ec = new EnvironmentContext();
+     ec.setProperties(Collections.singletonMap("a", "b"));
+     client.alter_partition(catName, dbName, tableName, newPart, ec);
+     fetched =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a4"));
+     Assert.assertEquals(catName, fetched.getCatName());
+     Assert.assertEquals("test_value", fetched.getParameters().get("test_key"));
+ 
+ 
+     client.dropDatabase(catName, dbName, true, true, true);
+     client.dropCatalog(catName);
+   }
+ 
+   @SuppressWarnings("deprecation")
+   @Test
+   public void deprecatedCalls() throws TException {
+     String tableName = "deprecated_table";
+     Table table = new TableBuilder()
+         .setTableName(tableName)
+         .addCol("id", "int")
+         .addCol("name", "string")
+         .addPartCol("partcol", "string")
+         .create(client, metaStore.getConf());
+ 
+     Partition[] parts = new Partition[5];
+     for (int i = 0; i < 5; i++) {
+       parts[i] = new PartitionBuilder()
+           .inTable(table)
+           .addValue("a" + i)
+           .setLocation(MetaStoreTestUtils.getTestWarehouseDir("a" + i))
+           .build(metaStore.getConf());
+     }
+     client.add_partitions(Arrays.asList(parts));
+ 
+     Partition newPart =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a0"));
+     newPart.getParameters().put("test_key", "test_value");
+     client.alter_partition(DEFAULT_DATABASE_NAME, tableName, newPart);
+ 
+     Partition fetched =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a0"));
+     Assert.assertEquals("test_value", fetched.getParameters().get("test_key"));
+ 
+     newPart =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a1"));
+     newPart.setLastAccessTime(3);
+     Partition newPart1 =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a2"));
+     newPart1.getSd().setLocation("somewhere");
+     client.alter_partitions(DEFAULT_DATABASE_NAME, tableName, Arrays.asList(newPart, newPart1));
+     fetched =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a1"));
+     Assert.assertEquals(3L, fetched.getLastAccessTime());
+     fetched =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a2"));
+     Assert.assertTrue(fetched.getSd().getLocation().contains("somewhere"));
+ 
+     newPart =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a3"));
+     newPart.setValues(Collections.singletonList("b3"));
+     client.renamePartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a3"), newPart);
+     fetched =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("b3"));
+     Assert.assertEquals(1, fetched.getValuesSize());
+     Assert.assertEquals("b3", fetched.getValues().get(0));
+ 
+     newPart =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a4"));
+     newPart.getParameters().put("test_key", "test_value");
+     EnvironmentContext ec = new EnvironmentContext();
+     ec.setProperties(Collections.singletonMap("a", "b"));
+     client.alter_partition(DEFAULT_DATABASE_NAME, tableName, newPart, ec);
+     fetched =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a4"));
+     Assert.assertEquals("test_value", fetched.getParameters().get("test_key"));
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionUnknownPartition() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).addValue("1111").addValue("11").addValue("11").build(metaStore.getConf());
+     client.alter_partition(DB_NAME, TABLE_NAME, part);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionIncompletePartitionVals() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).addValue("2017").build(metaStore.getConf());
+     client.alter_partition(DB_NAME, TABLE_NAME, part);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionMissingPartitionVals() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).build(metaStore.getConf());
+     client.alter_partition(DB_NAME, TABLE_NAME, part);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionBogusCatalogName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     client.alter_partition("nosuch", DB_NAME, TABLE_NAME, partitions.get(3));
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionNoDbName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     client.alter_partition("", TABLE_NAME, partitions.get(3));
+   }
+ 
 -  @Test(expected = MetaException.class)
++  @Test
+   public void testAlterPartitionNullDbName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
 -    client.alter_partition(null, TABLE_NAME, partitions.get(3));
++    try {
++      client.alter_partition(null, TABLE_NAME, partitions.get(3));
++      Assert.fail("Expected exception");
++    } catch (MetaException | TProtocolException ex) {
++    }
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionNoTblName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     client.alter_partition(DB_NAME, "", partitions.get(3));
+   }
+ 
 -  @Test(expected = MetaException.class)
++  @Test
+   public void testAlterPartitionNullTblName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
 -    client.alter_partition(DB_NAME, null, partitions.get(3));
++    try {
++      client.alter_partition(DB_NAME, null, partitions.get(3));
++      Assert.fail("Expected exception");
++    } catch (MetaException | TProtocolException ex) {
++    }
+   }
+ 
+   @Test
+   public void testAlterPartitionNullPartition() throws Exception {
+     try {
+       createTable4PartColsParts(client);
+       List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+       client.alter_partition(DB_NAME, TABLE_NAME, null);
+       fail("Should have thrown exception");
+     } catch (NullPointerException | TTransportException e) {
+       //TODO: should not throw different exceptions for different HMS deployment types
+     }
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionChangeDbName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition partition = partitions.get(3);
+     partition.setDbName(DB_NAME+"_changed");
+     client.alter_partition(DB_NAME, TABLE_NAME, partition);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionChangeTableName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition partition = partitions.get(3);
+     partition.setTableName(TABLE_NAME+"_changed");
+     client.alter_partition(DB_NAME, TABLE_NAME, partition);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionChangeValues() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition partition = partitions.get(3);
+     partition.setValues(Lists.newArrayList("1", "2", "3"));
+     client.alter_partition(DB_NAME, TABLE_NAME, partition);
+   }
+ 
+ 
+   /**
+    * Testing alter_partition(String,String,Partition,EnvironmentContext) ->
+    *         alter_partition_with_environment_context(String,String,Partition,EnvironmentContext).
+    */
+   @Test
+   public void testAlterPartitionWithEnvironmentCtx() throws Exception {
+     EnvironmentContext context = new EnvironmentContext();
+     context.setProperties(new HashMap<String, String>(){
+       {
+         put("TestKey", "TestValue");
+       }
+     });
+ 
+     List<List<String>> testValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition partition = oldParts.get(3);
+ 
+     assertPartitionUnchanged(partition, testValues.get(3), PARTCOL_SCHEMA);
+     makeTestChangesOnPartition(partition);
+ 
+     client.alter_partition(DB_NAME, TABLE_NAME, partition, context);
+ 
+     List<Partition> newParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     partition = newParts.get(3);
+     assertPartitionChanged(partition, testValues.get(3), PARTCOL_SCHEMA);
+     assertPartitionsHaveCorrectValues(newParts, testValues);
+ 
+     client.alter_partition(DB_NAME, TABLE_NAME, partition, new EnvironmentContext());
+     client.alter_partition(DB_NAME, TABLE_NAME, partition, null);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionWithEnvironmentCtxUnknownPartition() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).addValue("1111").addValue("11").addValue("11").build(metaStore.getConf());
+     client.alter_partition(DB_NAME, TABLE_NAME, part, new EnvironmentContext());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionWithEnvironmentCtxIncompletePartitionVals() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).addValue("2017").build(metaStore.getConf());
+     client.alter_partition(DB_NAME, TABLE_NAME, part, new EnvironmentContext());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionWithEnvironmentCtxMissingPartitionVals() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).build(metaStore.getConf());
+     client.alter_partition(DB_NAME, TABLE_NAME, part, new EnvironmentContext());
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionWithEnvironmentCtxNoDbName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     client.alter_partition("", TABLE_NAME, partitions.get(3), new EnvironmentContext());
+   }
+ 
 -  @Test(expected = MetaException.class)
++  @Test
+   public void testAlterPartitionWithEnvironmentCtxNullDbName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
 -    client.alter_partition(null, TABLE_NAME, partitions.get(3), new EnvironmentContext());
++    try {
++      client.alter_partition(null, TABLE_NAME, partitions.get(3), new EnvironmentContext());
++      Assert.fail("Expected exception");
++    } catch (MetaException | TProtocolException ex) {
++    }
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionWithEnvironmentCtxNoTblName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     client.alter_partition(DB_NAME, "", partitions.get(3), new EnvironmentContext());
+   }
+ 
 -  @Test(expected = MetaException.class)
++  @Test
+   public void testAlterPartitionWithEnvironmentCtxNullTblName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
 -    client.alter_partition(DB_NAME, null, partitions.get(3), new EnvironmentContext());
++    try {
++      client.alter_partition(DB_NAME, null, partitions.get(3), new EnvironmentContext());
++      Assert.fail("Expected exception");
++    } catch (MetaException | TProtocolException ex) {
++    }
+   }
+ 
+   @Test
+   public void testAlterPartitionWithEnvironmentCtxNullPartition() throws Exception {
+     try {
+       createTable4PartColsParts(client);
+       List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short) -1);
+       client.alter_partition(DB_NAME, TABLE_NAME, null, new EnvironmentContext());
+       fail("Should have thrown exception");
+     } catch (NullPointerException | TTransportException e) {
+       //TODO: should not throw different exceptions for different HMS deployment types
+     }
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionWithEnvironmentCtxChangeDbName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition partition = partitions.get(3);
+     partition.setDbName(DB_NAME+"_changed");
+     client.alter_partition(DB_NAME, TABLE_NAME, partition, new EnvironmentContext());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionWithEnvironmentCtxChangeTableName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition partition = partitions.get(3);
+     partition.setTableName(TABLE_NAME+"_changed");
+     client.alter_partition(DB_NAME, TABLE_NAME, partition, new EnvironmentContext());
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionWithEnvironmentCtxChangeValues() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition partition = partitions.get(3);
+     partition.setValues(Lists.newArrayList("1", "2", "3"));
+     client.alter_partition(DB_NAME, TABLE_NAME, partition, new EnvironmentContext());
+   }
+ 
+ 
+ 
+   /**
+    * Testing
+    *    alter_partitions(String,String,List(Partition)) ->
+    *    alter_partitions_with_environment_context(String,String,List(Partition),null).
+    */
+   @Test
+   public void testAlterPartitions() throws Exception {
+     List<List<String>> testValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     for (int i = 0; i < testValues.size(); ++i) {
+       assertPartitionUnchanged(oldParts.get(i), testValues.get(i), PARTCOL_SCHEMA);
+     }
+     oldParts.forEach(p -> makeTestChangesOnPartition(p));
+ 
+     client.alter_partitions(DB_NAME, TABLE_NAME, oldParts);
+ 
+     List<Partition> newParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     for (int i = 0; i < testValues.size(); ++i) {
+       assertPartitionChanged(oldParts.get(i), testValues.get(i), PARTCOL_SCHEMA);
+     }
+     assertPartitionsHaveCorrectValues(newParts, testValues);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsEmptyPartitionList() throws Exception {
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList());
+   }
+ 
+   @Test
+   public void testAlterPartitionsUnknownPartition() throws Exception {
+     Partition part1 = null;
+     try {
+       createTable4PartColsParts(client);
+       Table t = client.getTable(DB_NAME, TABLE_NAME);
+       PartitionBuilder builder = new PartitionBuilder();
+       Partition part = builder.inTable(t).addValue("1111").addValue("11").addValue("11").build(metaStore.getConf());
+       part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short) -1).get(0);
+       makeTestChangesOnPartition(part1);
+       client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1));
+       fail("Should have thrown InvalidOperationException");
+     } catch (InvalidOperationException e) {
+       part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short) -1).get(0);
+       assertPartitionUnchanged(part1, part1.getValues(), PARTCOL_SCHEMA);
+     }
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsIncompletePartitionVals() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).addValue("2017").build(metaStore.getConf());
+     Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1));
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsMissingPartitionVals() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).build(metaStore.getConf());
+     Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1));
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsBogusCatalogName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList(part));
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsNoDbName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions("", TABLE_NAME, Lists.newArrayList(part));
+   }
+ 
 -  @Test(expected = MetaException.class)
++  @Test
+   public void testAlterPartitionsNullDbName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
 -    client.alter_partitions(null, TABLE_NAME, Lists.newArrayList(part));
++    try {
++      client.alter_partitions(null, TABLE_NAME, Lists.newArrayList(part));
++      Assert.fail("Expected exception");
++    } catch (MetaException | TProtocolException ex) {
++    }
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsNoTblName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, "", Lists.newArrayList(part));
+   }
+ 
 -  @Test(expected = MetaException.class)
++  @Test
+   public void testAlterPartitionsNullTblName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
 -    client.alter_partitions(DB_NAME, null, Lists.newArrayList(part));
++    try {
++      client.alter_partitions(DB_NAME, null, Lists.newArrayList(part));
++      Assert.fail("didn't throw");
++    } catch (TProtocolException | MetaException e) {
++      // By design
++    }
+   }
+ 
+   @Test(expected = NullPointerException.class)
+   public void testAlterPartitionsNullPartition() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, null));
+   }
+ 
+   @Test(expected = NullPointerException.class)
+   public void testAlterPartitionsNullPartitions() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(null, null));
+   }
+ 
+   @Test
+   public void testAlterPartitionsNullPartitionList() throws Exception {
+     try {
+       createTable4PartColsParts(client);
+       Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+       client.alter_partitions(DB_NAME, TABLE_NAME, null);
+       fail("Should have thrown exception");
 -    } catch (NullPointerException | TTransportException e) {
++    } catch (NullPointerException | TTransportException | TProtocolException e) {
+       //TODO: should not throw different exceptions for different HMS deployment types
+     }
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsChangeDbName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition p = partitions.get(3);
+     p.setDbName(DB_NAME+"_changed");
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(p));
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsChangeTableName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition p = partitions.get(3);
+     p.setTableName(TABLE_NAME+"_changed");
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(p));
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsChangeValues() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition p = partitions.get(3);
+     p.setValues(Lists.newArrayList("1", "2", "3"));
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(p));
+   }
+ 
+ 
+ 
+   /**
+    * Testing
+    *    alter_partitions(String,String,List(Partition),EnvironmentContext) ->
+    *    alter_partitions_with_environment_context(String,String,List(Partition),EnvironmentContext).
+    */
+   @Test
+   public void testAlterPartitionsWithEnvironmentCtx() throws Exception {
+     EnvironmentContext context = new EnvironmentContext();
+     context.setProperties(new HashMap<String, String>(){
+       {
+         put("TestKey", "TestValue");
+       }
+     });
+ 
+     List<List<String>> testValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     for (int i = 0; i < testValues.size(); ++i) {
+       assertPartitionUnchanged(oldParts.get(i), testValues.get(i), PARTCOL_SCHEMA);
+     }
+     oldParts.forEach(p -> makeTestChangesOnPartition(p));
+ 
+     client.alter_partitions(DB_NAME, TABLE_NAME, oldParts, context);
+ 
+     List<Partition> newParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     for (int i = 0; i < testValues.size(); ++i) {
+       assertPartitionChanged(oldParts.get(i), testValues.get(i), PARTCOL_SCHEMA);
+     }
+     assertPartitionsHaveCorrectValues(newParts, testValues);
+ 
+     client.alter_partitions(DB_NAME, TABLE_NAME, newParts, new EnvironmentContext());
 -    client.alter_partitions(DB_NAME, TABLE_NAME, newParts, null);
++    client.alter_partitions(DB_NAME, TABLE_NAME, newParts);
+ 
+     for (int i = 0; i < testValues.size(); ++i) {
+       assertPartitionChanged(oldParts.get(i), testValues.get(i), PARTCOL_SCHEMA);
+     }
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsWithEnvironmentCtxEmptyPartitionList() throws Exception {
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(), new EnvironmentContext());
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsWithEnvironmentCtxUnknownPartition() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).addValue("1111").addValue("11").addValue("11").build(metaStore.getConf());
+     Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1),
+             new EnvironmentContext());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsWithEnvironmentCtxIncompletePartitionVals() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).addValue("2017").build(metaStore.getConf());
+     Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1),
+             new EnvironmentContext());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsWithEnvironmentCtxMissingPartitionVals() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).build(metaStore.getConf());
+     Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1),
+             new EnvironmentContext());
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsWithEnvironmentCtxBogusCatalogName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
 -    client.alter_partitions("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext());
++    client.alter_partitions("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext(),
++        -1, null, -1);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsWithEnvironmentCtxNoDbName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions("", TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext());
+   }
+ 
 -  @Test(expected = MetaException.class)
++  @Test
+   public void testAlterPartitionsWithEnvironmentCtxNullDbName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
 -    client.alter_partitions(null, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext());
++    try {
++      client.alter_partitions(null, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext());
++      Assert.fail("Expected exception");
++    } catch (MetaException | TProtocolException ex) {
++    }
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsWithEnvironmentCtxNoTblName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, "", Lists.newArrayList(part), new EnvironmentContext());
+   }
+ 
 -  @Test(expected = MetaException.class)
++  @Test
+   public void testAlterPartitionsWithEnvironmentCtxNullTblName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
 -    client.alter_partitions(DB_NAME, null, Lists.newArrayList(part), new EnvironmentContext());
++    try {
++      client.alter_partitions(DB_NAME, null, Lists.newArrayList(part), new EnvironmentContext());
++      Assert.fail("didn't throw");
++    } catch (MetaException | TProtocolException ex) {
++      // By design.
++    }
+   }
+ 
+   @Test(expected = NullPointerException.class)
+   public void testAlterPartitionsWithEnvironmentCtxNullPartition() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, null),
+             new EnvironmentContext());
+   }
+ 
+   @Test(expected = NullPointerException.class)
+   public void testAlterPartitionsWithEnvironmentCtxNullPartitions() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(null, null),
+             new EnvironmentContext());
+   }
+ 
+   @Test
+   public void testAlterPartitionsWithEnvironmentCtxNullPartitionList() throws Exception {
+     try {
+       createTable4PartColsParts(client);
+       Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+       client.alter_partitions(DB_NAME, TABLE_NAME, null, new EnvironmentContext());
+       fail("Should have thrown exception");
 -    } catch (NullPointerException | TTransportException e) {
++    } catch (NullPointerException | TTransportException | TProtocolException e) {
+       //TODO: should not throw different exceptions for different HMS deployment types
+     }
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsWithEnvironmentCtxChangeDbName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition p = partitions.get(3);
+     p.setDbName(DB_NAME+"_changed");
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(p), new EnvironmentContext());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsWithEnvironmentCtxChangeTableName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition p = partitions.get(3);
+     p.setTableName(TABLE_NAME+"_changed");
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(p), new EnvironmentContext());
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsWithEnvironmentCtxChangeValues() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition p = partitions.get(3);
+     p.setValues(Lists.newArrayList("1", "2", "3"));
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(p), new EnvironmentContext());
+   }
+ 
+   /**
+    * Testing
+    *    renamePartition(String,String,List(String),Partition) ->
+    *    renamePartition(String,String,List(String),Partition).
+    */
+   @Test
+   public void testRenamePartition() throws Exception {
+ 
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<List<String>> newValues = new ArrayList<>();
+ 
+     List<String> newVal = Lists.newArrayList("2018", "01", "16");
+     newValues.addAll(oldValues.subList(0, 3));
+     newValues.add(newVal);
+ 
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(newVal);
+     makeTestChangesOnPartition(partToRename);
+     client.renamePartition(DB_NAME, TABLE_NAME, oldValues.get(3), partToRename);
+ 
+     List<Partition> newParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     assertPartitionsHaveCorrectValues(newParts, newValues);
+ 
+ 
+     //Asserting other partition parameters can also be changed, but not the location
+     assertFalse(newParts.get(3).getSd().getLocation().endsWith("hh=01"));
+     assertEquals(3, newParts.get(3).getSd().getCols().size());
+     assertEquals("testValue001", newParts.get(3).getParameters().get("hmsTestParam001"));
+     assertEquals(NEW_CREATE_TIME, newParts.get(3).getCreateTime());
+     assertEquals(NEW_CREATE_TIME, newParts.get(3).getLastAccessTime());
+ 
+ 
+ 
+     assertTrue(client.listPartitions(DB_NAME, TABLE_NAME, oldValues.get(3), (short)-1).isEmpty());
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testRenamePartitionTargetAlreadyExisting() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition(DB_NAME, TABLE_NAME, oldValues.get(3), oldParts.get(2));
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testRenamePartitionNoSuchOldPartition() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition(DB_NAME, TABLE_NAME, Lists.newArrayList("1", "2", ""), partToRename);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testRenamePartitionNullTableInPartition() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     partToRename.setTableName(null);
+     client.renamePartition(DB_NAME, TABLE_NAME, Lists.newArrayList("2017", "11", "27"),
+             partToRename);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testRenamePartitionNullDbInPartition() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     partToRename.setDbName(null);
+     client.renamePartition(DB_NAME, TABLE_NAME, Lists.newArrayList("2017", "11", "27"),
+             partToRename);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testRenamePartitionEmptyOldPartList() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition(DB_NAME, TABLE_NAME, Lists.newArrayList(), partToRename);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testRenamePartitionNullOldPartList() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition(DB_NAME, TABLE_NAME, null, partToRename);
+   }
+ 
+   @Test
+   public void testRenamePartitionNullNewPart() throws Exception {
+     try {
+       List<List<String>> oldValues = createTable4PartColsParts(client);
+       List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short) -1);
+ 
+       Partition partToRename = oldParts.get(3);
+       partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+       client.renamePartition(DB_NAME, TABLE_NAME, oldValues.get(3), null);
+     } catch (NullPointerException | TTransportException e) {
+     }
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testRenamePartitionBogusCatalogName() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
 -    client.renamePartition("nosuch", DB_NAME, TABLE_NAME, oldValues.get(3), partToRename);
++    client.renamePartition("nosuch", DB_NAME, TABLE_NAME, oldValues.get(3), partToRename, -1, null);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testRenamePartitionNoDbName() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition("", TABLE_NAME, oldValues.get(3), partToRename);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testRenamePartitionNoTblName() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition(DB_NAME, "", oldValues.get(3), partToRename);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testRenamePartitionNullDbName() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition(null, TABLE_NAME, oldValues.get(3), partToRename);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testRenamePartitionNullTblName() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition(DB_NAME, null, oldValues.get(3), partToRename);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testRenamePartitionChangeTblName() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     partToRename.setTableName(TABLE_NAME + "_2");
+     client.renamePartition(DB_NAME, TABLE_NAME, oldValues.get(3), partToRename);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testRenamePartitionChangeDbName() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     partToRename.setDbName(DB_NAME + "_2");
+     client.renamePartition(DB_NAME, TABLE_NAME, oldValues.get(3), partToRename);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testRenamePartitionNoTable() throws Exception {
+     client.renamePartition(DB_NAME, TABLE_NAME, Lists.newArrayList("2018", "01", "16"),
+             new Partition());
+   }
+ 
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java
index 0000000,8ce8531..462584a
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestAppendPartitions.java
@@@ -1,0 -1,594 +1,600 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.client;
+ 
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.stream.Collectors;
+ 
+ import org.apache.commons.lang.StringUtils;
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+ import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
+ import org.apache.hadoop.hive.metastore.TableType;
+ import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest;
+ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+ import org.apache.hadoop.hive.metastore.api.Catalog;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+ import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.transport.TTransportException;
+ import org.junit.After;
+ import org.junit.Assert;
+ import org.junit.Before;
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ import org.junit.runner.RunWith;
+ import org.junit.runners.Parameterized;
+ 
+ import com.google.common.collect.Lists;
+ 
+ /**
+  * Tests for appending partitions.
+  */
+ @RunWith(Parameterized.class)
+ @Category(MetastoreCheckinTest.class)
+ public class TestAppendPartitions extends MetaStoreClientTest {
+   private AbstractMetaStoreService metaStore;
+   private IMetaStoreClient client;
+ 
+   private static final String DB_NAME = "test_append_part_db";
+   private static Table tableWithPartitions;
+   private static Table externalTable;
+   private static Table tableNoPartColumns;
+   private static Table tableView;
+ 
+   public TestAppendPartitions(String name, AbstractMetaStoreService metaStore) {
+     this.metaStore = metaStore;
+   }
+ 
+   @Before
+   public void setUp() throws Exception {
+     // Get new client
+     client = metaStore.getClient();
+ 
+     // Clean up the database
+     client.dropDatabase(DB_NAME, true, true, true);
+     metaStore.cleanWarehouseDirs();
+     new DatabaseBuilder()
+         .setName(DB_NAME)
+         .create(client, metaStore.getConf());
+ 
+     tableWithPartitions = createTableWithPartitions();
+     externalTable = createExternalTable();
+     tableNoPartColumns = createTableNoPartitionColumns();
+     tableView = createView();
+   }
+ 
+   @After
+   public void tearDown() throws Exception {
+     try {
+       if (client != null) {
+         try {
+           client.close();
+         } catch (Exception e) {
+           // HIVE-19729: Shallow the exceptions based on the discussion in the Jira
+         }
+       }
+     } finally {
+       client = null;
+     }
+   }
+ 
+   // Tests for Partition appendPartition(String tableName, String dbName, List<String> partVals) method
+ 
+   @Test
+   public void testAppendPartition() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2017", "may");
+     Table table = tableWithPartitions;
+ 
+     Partition appendedPart =
+         client.appendPartition(table.getDbName(), table.getTableName(), partitionValues);
+ 
+     Assert.assertNotNull(appendedPart);
+     Partition partition =
+         client.getPartition(table.getDbName(), table.getTableName(), partitionValues);
++    appendedPart.setWriteId(partition.getWriteId());
+     Assert.assertEquals(partition, appendedPart);
+     verifyPartition(partition, table, partitionValues, "year=2017/month=may");
+     verifyPartitionNames(table, Lists.newArrayList("year=2017/month=march", "year=2017/month=april",
+         "year=2018/month=march", "year=2017/month=may"));
+   }
+ 
+   @Test
+   public void testAppendPartitionToExternalTable() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2017", "may");
+     Table table = externalTable;
+ 
+     Partition appendedPart =
+         client.appendPartition(table.getDbName(), table.getTableName(), partitionValues);
+ 
+     Assert.assertNotNull(appendedPart);
+     Partition partition =
+         client.getPartition(table.getDbName(), table.getTableName(), partitionValues);
++    appendedPart.setWriteId(partition.getWriteId());
+     Assert.assertEquals(partition, appendedPart);
+     verifyPartition(partition, table, partitionValues, "year=2017/month=may");
+     verifyPartitionNames(table, Lists.newArrayList("year=2017/month=may"));
+   }
+ 
+   @Test
+   public void testAppendPartitionMultiplePartitions() throws Exception {
+ 
+     List<String> partitionValues1 = Lists.newArrayList("2017", "may");
+     List<String> partitionValues2 = Lists.newArrayList("2018", "may");
+     List<String> partitionValues3 = Lists.newArrayList("2017", "june");
+ 
+     Table table = tableWithPartitions;
+ 
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionValues1);
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionValues2);
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionValues3);
+ 
+     verifyPartitionNames(table,
+         Lists.newArrayList("year=2017/month=may", "year=2018/month=may", "year=2017/month=june",
+             "year=2017/month=march", "year=2017/month=april", "year=2018/month=march"));
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAppendPartitionToTableWithoutPartCols() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2017", "may");
+     Table table = tableNoPartColumns;
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionValues);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAppendPartitionToView() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2017", "may");
+     Table table = tableView;
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionValues);
+   }
+ 
+   @Test(expected = AlreadyExistsException.class)
+   public void testAppendPartitionAlreadyExists() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2017", "april");
+     Table table = tableWithPartitions;
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionValues);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartitionNonExistingDB() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2017", "may");
+     client.appendPartition("nonexistingdb", tableWithPartitions.getTableName(), partitionValues);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartitionNonExistingTable() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2017", "may");
+     client.appendPartition(tableWithPartitions.getDbName(), "nonexistingtable", partitionValues);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartitionEmptyDB() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2017", "may");
+     client.appendPartition("", tableWithPartitions.getTableName(), partitionValues);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartitionEmptyTable() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2017", "may");
+     client.appendPartition(tableWithPartitions.getDbName(), "", partitionValues);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAppendPartitionNullDB() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2017", "may");
+     client.appendPartition(null, tableWithPartitions.getTableName(), partitionValues);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAppendPartitionNullTable() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2017", "may");
+     client.appendPartition(tableWithPartitions.getDbName(), null, partitionValues);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAppendPartitionEmptyPartValues() throws Exception {
+ 
+     Table table = tableWithPartitions;
+     client.appendPartition(table.getDbName(), table.getTableName(), new ArrayList<>());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAppendPartitionNullPartValues() throws Exception {
+ 
+     Table table = tableWithPartitions;
+     client.appendPartition(table.getDbName(), table.getTableName(), (List<String>) null);
+   }
+ 
+   @Test
+   public void testAppendPartitionLessPartValues() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2019");
+     Table table = tableWithPartitions;
+ 
+     try {
+       client.appendPartition(table.getDbName(), table.getTableName(), partitionValues);
+       Assert.fail("Exception should have been thrown.");
+     } catch (MetaException e) {
+       // Expected exception
+     }
+     verifyPartitionNames(table, Lists.newArrayList("year=2017/month=march", "year=2017/month=april",
+         "year=2018/month=march"));
+     String partitionLocation = table.getSd().getLocation() + "/year=2019";
+     Assert.assertFalse(metaStore.isPathExists(new Path(partitionLocation)));
+   }
+ 
+   @Test
+   public void testAppendPartitionMorePartValues() throws Exception {
+ 
+     List<String> partitionValues = Lists.newArrayList("2019", "march", "12");
+     Table table = tableWithPartitions;
+ 
+     try {
+       client.appendPartition(table.getDbName(), table.getTableName(), partitionValues);
+       Assert.fail("Exception should have been thrown.");
+     } catch (MetaException e) {
+       // Expected exception
+     }
+     verifyPartitionNames(table, Lists.newArrayList("year=2017/month=march", "year=2017/month=april",
+         "year=2018/month=march"));
+     String partitionLocation = tableWithPartitions.getSd().getLocation() + "/year=2019";
+     Assert.assertFalse(metaStore.isPathExists(new Path(partitionLocation)));
+   }
+ 
+   // Tests for Partition appendPartition(String tableName, String dbName, String name) method
+ 
+   @Test
+   public void testAppendPart() throws Exception {
+ 
+     Table table = tableWithPartitions;
+     String partitionName = "year=2017/month=may";
+ 
+     Partition appendedPart =
+         client.appendPartition(table.getDbName(), table.getTableName(), partitionName);
+ 
+     Assert.assertNotNull(appendedPart);
+     Partition partition = client.getPartition(table.getDbName(), table.getTableName(),
+         getPartitionValues(partitionName));
++    appendedPart.setWriteId(partition.getWriteId());
+     Assert.assertEquals(partition, appendedPart);
+     verifyPartition(partition, table, getPartitionValues(partitionName), partitionName);
+     verifyPartitionNames(table, Lists.newArrayList("year=2017/month=march", "year=2017/month=april",
+         "year=2018/month=march", partitionName));
+   }
+ 
+   @Test
+   public void testAppendPartToExternalTable() throws Exception {
+ 
+     Table table = externalTable;
+     String partitionName = "year=2017/month=may";
+ 
+     Partition appendedPart =
+         client.appendPartition(table.getDbName(), table.getTableName(), partitionName);
+ 
+     Assert.assertNotNull(appendedPart);
+     Partition partition = client.getPartition(table.getDbName(), table.getTableName(),
+         getPartitionValues(partitionName));
++    appendedPart.setWriteId(partition.getWriteId());
+     Assert.assertEquals(partition, appendedPart);
+     verifyPartition(partition, table, getPartitionValues(partitionName), partitionName);
+     verifyPartitionNames(table, Lists.newArrayList(partitionName));
+   }
+ 
+   @Test
+   public void testAppendPartMultiplePartitions() throws Exception {
+ 
+     String partitionName1 = "year=2017/month=may";
+     String partitionName2 = "year=2018/month=may";
+     String partitionName3 = "year=2017/month=june";
+     Table table = tableWithPartitions;
+ 
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionName1);
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionName2);
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionName3);
+ 
+     verifyPartitionNames(table, Lists.newArrayList(partitionName1, partitionName2, partitionName3,
+         "year=2017/month=march", "year=2017/month=april", "year=2018/month=march"));
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAppendPartToTableWithoutPartCols() throws Exception {
+ 
+     String partitionName = "year=2017/month=may";
+     Table table = tableNoPartColumns;
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionName);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAppendPartToView() throws Exception {
+ 
+     String partitionName = "year=2017/month=may";
+     Table table = tableView;
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionName);
+   }
+ 
+   @Test(expected = AlreadyExistsException.class)
+   public void testAppendPartAlreadyExists() throws Exception {
+ 
+     String partitionName = "year=2017/month=april";
+     Table table = tableWithPartitions;
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionName);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartNonExistingDB() throws Exception {
+ 
+     String partitionName = "year=2017/month=april";
+     client.appendPartition("nonexistingdb", tableWithPartitions.getTableName(), partitionName);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartNonExistingTable() throws Exception {
+ 
+     String partitionName = "year=2017/month=april";
+     client.appendPartition(tableWithPartitions.getDbName(), "nonexistingtable", partitionName);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartEmptyDB() throws Exception {
+ 
+     String partitionName = "year=2017/month=april";
+     client.appendPartition("", tableWithPartitions.getTableName(), partitionName);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartEmptyTable() throws Exception {
+ 
+     String partitionName = "year=2017/month=april";
+     client.appendPartition(tableWithPartitions.getDbName(), "", partitionName);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAppendPartNullDB() throws Exception {
+ 
+     String partitionName = "year=2017/month=april";
+     client.appendPartition(null, tableWithPartitions.getTableName(), partitionName);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAppendPartNullTable() throws Exception {
+ 
+     String partitionName = "year=2017/month=april";
+     client.appendPartition(tableWithPartitions.getDbName(), null, partitionName);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAppendPartEmptyPartName() throws Exception {
+ 
+     Table table = tableWithPartitions;
+     client.appendPartition(table.getDbName(), table.getTableName(), "");
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAppendPartNullPartName() throws Exception {
+ 
+     Table table = tableWithPartitions;
+     client.appendPartition(table.getDbName(), table.getTableName(), (String) null);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartLessPartValues() throws Exception {
+ 
+     String partitionName = "year=2019";
+     Table table = tableWithPartitions;
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionName);
+   }
+ 
+   @Test
+   public void testAppendPartMorePartValues() throws Exception {
+ 
+     String partitionName = "year=2019/month=march/day=12";
+     Table table = tableWithPartitions;
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionName);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartInvalidPartName() throws Exception {
+ 
+     String partitionName = "invalidpartname";
+     Table table = tableWithPartitions;
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionName);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartWrongColumnInPartName() throws Exception {
+ 
+     String partitionName = "year=2019/honap=march";
+     Table table = tableWithPartitions;
+     client.appendPartition(table.getDbName(), table.getTableName(), partitionName);
+   }
+ 
+   @Test
+   public void otherCatalog() throws TException {
+     String catName = "append_partition_catalog";
+     Catalog cat = new CatalogBuilder()
+         .setName(catName)
+         .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName))
+         .build();
+     client.createCatalog(cat);
+ 
+     String dbName = "append_partition_database_in_other_catalog";
+     Database db = new DatabaseBuilder()
+         .setName(dbName)
+         .setCatalogName(catName)
+         .create(client, metaStore.getConf());
+ 
+     String tableName = "table_in_other_catalog";
+     new TableBuilder()
+         .inDb(db)
+         .setTableName(tableName)
+         .addCol("id", "int")
+         .addCol("name", "string")
+         .addPartCol("partcol", "string")
+         .create(client, metaStore.getConf());
+ 
+     Partition created =
+         client.appendPartition(catName, dbName, tableName, Collections.singletonList("a1"));
+     Assert.assertEquals(1, created.getValuesSize());
+     Assert.assertEquals("a1", created.getValues().get(0));
+     Partition fetched =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a1"));
++    created.setWriteId(fetched.getWriteId());
+     Assert.assertEquals(created, fetched);
+ 
+     created = client.appendPartition(catName, dbName, tableName, "partcol=a2");
+     Assert.assertEquals(1, created.getValuesSize());
+     Assert.assertEquals("a2", created.getValues().get(0));
+     fetched = client.getPartition(catName, dbName, tableName, Collections.singletonList("a2"));
++    created.setWriteId(fetched.getWriteId());
+     Assert.assertEquals(created, fetched);
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartitionBogusCatalog() throws Exception {
+     client.appendPartition("nosuch", DB_NAME, tableWithPartitions.getTableName(),
+         Lists.newArrayList("2017", "may"));
+   }
+ 
+   @Test(expected = InvalidObjectException.class)
+   public void testAppendPartitionByNameBogusCatalog() throws Exception {
+     client.appendPartition("nosuch", DB_NAME, tableWithPartitions.getTableName(),
+         "year=2017/month=april");
+   }
+ 
+   // Helper methods
+ 
+   private Table createTableWithPartitions() throws Exception {
+     Table table = createTable("test_append_part_table_with_parts", getYearAndMonthPartCols(), null,
+         TableType.MANAGED_TABLE.name(),
+         metaStore.getWarehouseRoot() + "/test_append_part_table_with_parts");
+     createPartition(table, Lists.newArrayList("2017", "march"));
+     createPartition(table, Lists.newArrayList("2017", "april"));
+     createPartition(table, Lists.newArrayList("2018", "march"));
+     return table;
+   }
+ 
+   private Table createTableNoPartitionColumns() throws Exception {
+     Table table = createTable("test_append_part_table_no_part_columns", null, null, "MANAGED_TABLE",
+         metaStore.getWarehouseRoot() + "/test_append_part_table_no_part_columns");
+     return table;
+   }
+ 
+   private Table createExternalTable() throws Exception {
+     Map<String, String> tableParams = new HashMap<>();
+     tableParams.put("EXTERNAL", "TRUE");
+     Table table = createTable("test_append_part_external_table", getYearAndMonthPartCols(),
+         tableParams, TableType.EXTERNAL_TABLE.name(),
+         metaStore.getWarehouseRoot() + "/test_append_part_external_table");
+     return table;
+   }
+ 
+   private Table createView() throws Exception {
+     Table table = createTable("test_append_part_table_view", getYearAndMonthPartCols(), null,
+         TableType.VIRTUAL_VIEW.name(), null);
+     return table;
+   }
+ 
+   private Table createTable(String tableName, List<FieldSchema> partCols, Map<String,
+       String> tableParams, String tableType, String location) throws Exception {
+     new TableBuilder()
+         .setDbName(DB_NAME)
+         .setTableName(tableName)
+         .addCol("test_id", "int", "test col id")
+         .addCol("test_value", "string", "test col value")
+         .setPartCols(partCols)
+         .setTableParams(tableParams)
+         .setType(tableType)
+         .setLocation(location)
+         .create(client, metaStore.getConf());
+     return client.getTable(DB_NAME, tableName);
+   }
+ 
+   private void createPartition(Table table, List<String> values) throws Exception {
+     new PartitionBuilder()
+         .inTable(table)
+         .setValues(values)
+         .addToTable(client, metaStore.getConf());
+   }
+ 
+   private static List<FieldSchema> getYearAndMonthPartCols() {
+     List<FieldSchema> cols = new ArrayList<>();
+     cols.add(new FieldSchema("year", "string", "year part col"));
+     cols.add(new FieldSchema("month", "string", "month part col"));
+     return cols;
+   }
+ 
+   private static List<String> getPartitionValues(String partitionsName) {
+     List<String> values = new ArrayList<>();
+     if (StringUtils.isEmpty(partitionsName)) {
+       return values;
+     }
+     values = Arrays.stream(partitionsName.split("/")).map(v -> v.split("=")[1])
+         .collect(Collectors.toList());
+     return values;
+   }
+ 
+   private void verifyPartition(Partition partition, Table table, List<String> expectedPartValues,
+       String partitionName) throws Exception {
+     Assert.assertEquals(table.getTableName(), partition.getTableName());
+     Assert.assertEquals(table.getDbName(), partition.getDbName());
+     Assert.assertEquals(expectedPartValues, partition.getValues());
+     Assert.assertNotEquals(0, partition.getCreateTime());
+     Assert.assertEquals(0, partition.getLastAccessTime());
+     Assert.assertEquals(1, partition.getParameters().size());
+     Assert.assertTrue(partition.getParameters().containsKey("transient_lastDdlTime"));
+     StorageDescriptor partitionSD = partition.getSd();
+     Assert.assertEquals(table.getSd().getLocation() + "/" + partitionName,
+         partitionSD.getLocation());
+     partition.getSd().setLocation(table.getSd().getLocation());
+     Assert.assertEquals(table.getSd(), partitionSD);
+     Assert.assertTrue(metaStore.isPathExists(new Path(partitionSD.getLocation())));
+   }
+ 
+   private void verifyPartitionNames(Table table, List<String> expectedPartNames) throws Exception {
+     List<String> partitionNames =
+         client.listPartitionNames(table.getDbName(), table.getTableName(), (short) -1);
+     Assert.assertEquals(expectedPartNames.size(), partitionNames.size());
+     Assert.assertTrue(partitionNames.containsAll(expectedPartNames));
+   }
+ }


[39/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0721

Posted by se...@apache.org.
HIVE-19416 : merge master into branch (Sergey Shelukhin) 0721


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5c8b5d8b
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5c8b5d8b
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5c8b5d8b

Branch: refs/heads/master
Commit: 5c8b5d8b62beaffedd0681683638bf8bb7d98402
Parents: ddef895 788daf6
Author: sergey <se...@apache.org>
Authored: Sat Jul 21 12:51:36 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Sat Jul 21 12:51:36 2018 -0700

----------------------------------------------------------------------
 .../test/resources/testconfiguration.properties |   1 +
 .../hive/ql/optimizer/GenMRTableScan1.java      |   5 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java      |  12 +-
 .../org/apache/hadoop/hive/ql/parse/HiveLexer.g |   1 +
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |  11 +-
 .../hive/ql/parse/ProcessAnalyzeTable.java      |   5 +-
 .../hadoop/hive/ql/parse/TaskCompiler.java      |   6 +-
 .../parse/spark/SparkProcessAnalyzeTable.java   |   5 +-
 .../hive/ql/stats/BasicStatsNoJobTask.java      |   9 +
 .../queries/clientpositive/acid_no_buckets.q    |   8 +
 .../clientpositive/truncate_external_force.q    |  16 ++
 .../clientpositive/acid_table_stats.q.out       |  16 +-
 .../materialized_view_create_rewrite.q.out      |  58 ++---
 .../clientpositive/llap/acid_no_buckets.q.out   | 210 +++++++++++++++++--
 .../materialized_view_create_rewrite_4.q.out    |   6 +-
 .../materialized_view_create_rewrite_5.q.out    |   2 +-
 .../llap/truncate_external_force.q.out          |  85 ++++++++
 17 files changed, 393 insertions(+), 63 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/5c8b5d8b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/5c8b5d8b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/5c8b5d8b/ql/src/test/results/clientpositive/acid_table_stats.q.out
----------------------------------------------------------------------
diff --cc ql/src/test/results/clientpositive/acid_table_stats.q.out
index f3ada34,2fe4f97..32badd0
--- a/ql/src/test/results/clientpositive/acid_table_stats.q.out
+++ b/ql/src/test/results/clientpositive/acid_table_stats.q.out
@@@ -174,10 -211,9 +176,10 @@@ Database:           	defaul
  Table:              	acid                	 
  #### A masked pattern was here ####
  Partition Parameters:	 	 
 +	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
  	numFiles            	2                   
  	numRows             	1000                
- 	rawDataSize         	208000              
+ 	rawDataSize         	0                   
  	totalSize           	4063                
  #### A masked pattern was here ####
  	 	 
@@@ -225,10 -261,10 +227,10 @@@ Database:           	defaul
  Table:              	acid                	 
  #### A masked pattern was here ####
  Partition Parameters:	 	 
 -	COLUMN_STATS_ACCURATE	{\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
 +	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
  	numFiles            	2                   
  	numRows             	1000                
- 	rawDataSize         	208000              
+ 	rawDataSize         	0                   
  	totalSize           	4063                
  #### A masked pattern was here ####
  	 	 
@@@ -317,11 -389,10 +319,11 @@@ Database:           	defaul
  Table:              	acid                	 
  #### A masked pattern was here ####
  Partition Parameters:	 	 
 +	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
  	numFiles            	4                   
 -	numRows             	3000                
 +	numRows             	2000                
- 	rawDataSize         	208000              
+ 	rawDataSize         	0                   
 -	totalSize           	8118                
 +	totalSize           	8126                
  #### A masked pattern was here ####
  	 	 
  # Storage Information	 	 
@@@ -364,11 -437,10 +368,11 @@@ Database:           	defaul
  Table:              	acid                	 
  #### A masked pattern was here ####
  Partition Parameters:	 	 
 +	COLUMN_STATS_ACCURATE	{\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"key\":\"true\",\"value\":\"true\"}}
  	numFiles            	4                   
  	numRows             	2000                
- 	rawDataSize         	416000              
+ 	rawDataSize         	0                   
 -	totalSize           	8118                
 +	totalSize           	8126                
  #### A masked pattern was here ####
  	 	 
  # Storage Information	 	 
@@@ -433,11 -541,11 +437,11 @@@ STAGE PLANS
            TableScan
              alias: acid
              filterExpr: (ds = '2008-04-08') (type: boolean)
-             Statistics: Num rows: 2000 Data size: 416000 Basic stats: COMPLETE Column stats: NONE
 -            Statistics: Num rows: 2000 Data size: 81180 Basic stats: COMPLETE Column stats: NONE
++            Statistics: Num rows: 2000 Data size: 81260 Basic stats: COMPLETE Column stats: NONE
              Select Operator
                expressions: key (type: string)
                outputColumnNames: key
-               Statistics: Num rows: 2000 Data size: 416000 Basic stats: COMPLETE Column stats: NONE
 -              Statistics: Num rows: 2000 Data size: 81180 Basic stats: COMPLETE Column stats: NONE
++              Statistics: Num rows: 2000 Data size: 81260 Basic stats: COMPLETE Column stats: NONE
                Group By Operator
                  aggregations: max(key)
                  mode: hash

http://git-wip-us.apache.org/repos/asf/hive/blob/5c8b5d8b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out
----------------------------------------------------------------------
diff --cc ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out
index fe46bfd,bfa204e..42bfa4b
--- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out
+++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_4.q.out
@@@ -949,8 -948,8 +949,8 @@@ Table Parameters
  	bucketing_version   	2                   
  	numFiles            	3                   
  	numRows             	3                   
- 	rawDataSize         	248                 
+ 	rawDataSize         	0                   
 -	totalSize           	1508                
 +	totalSize           	1500                
  	transactional       	true                
  	transactional_properties	default             
  #### A masked pattern was here ####

http://git-wip-us.apache.org/repos/asf/hive/blob/5c8b5d8b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out
----------------------------------------------------------------------


[41/50] [abbrv] hive git commit: HIVE-19915 : support txn stats in CachedStore (Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-19915 : support txn stats in CachedStore (Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/64d75a43
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/64d75a43
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/64d75a43

Branch: refs/heads/master
Commit: 64d75a43d68edd4dfe27965e99e85bb575eda705
Parents: ba083ed
Author: sergey <se...@apache.org>
Authored: Sat Jul 21 13:43:20 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Sat Jul 21 13:43:20 2018 -0700

----------------------------------------------------------------------
 .../hadoop/hive/metastore/HiveMetaStore.java    |   4 +-
 .../hadoop/hive/metastore/ObjectStore.java      |  87 ++++----
 .../apache/hadoop/hive/metastore/RawStore.java  |  13 +-
 .../hive/metastore/cache/CachedStore.java       | 202 +++++++++++++------
 .../hive/metastore/cache/SharedCache.java       |  48 +++++
 .../DummyRawStoreControlledCommit.java          |  16 +-
 .../DummyRawStoreForJdoConnection.java          |  17 +-
 7 files changed, 261 insertions(+), 126 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/64d75a43/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index eb4eb1b..fb6029f 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -5831,7 +5831,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
 
       boolean ret = false;
       try {
-        ret = getMS().updateTableColumnStatistics(colStats, validWriteIds, writeId);
+        ret = getMS().updateTableColumnStatistics(colStats, validWriteIds, writeId) != null;
       } finally {
         endFunction("write_column_statistics", ret != false, null,
             colStats.getStatsDesc().getTableName());
@@ -5875,7 +5875,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
         }
         List<String> partVals = getPartValsFromName(tbl, csd.getPartName());
         return getMS().updatePartitionColumnStatistics(
-            colStats, partVals, validWriteIds, writeId);
+            colStats, partVals, validWriteIds, writeId) != null;
       } finally {
         endFunction("write_partition_column_statistics", ret != false, null, tableName);
       }

http://git-wip-us.apache.org/repos/asf/hive/blob/64d75a43/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 9c46b68..d522a4e 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -2433,7 +2433,7 @@ public class ObjectStore implements RawStore, Configurable {
   @Override
   public Partition getPartition(String catName, String dbName, String tableName,
                                 List<String> part_vals,
-                                String writeIdList)
+                                String validWriteIds)
       throws NoSuchObjectException, MetaException {
     openTransaction();
     MTable table = this.getMTable(catName, dbName, tableName);
@@ -2447,13 +2447,14 @@ public class ObjectStore implements RawStore, Configurable {
     part.setValues(part_vals);
     // If transactional table partition, check whether the current version partition
     // statistics in the metastore comply with the client query's snapshot isolation.
+    long statsWriteId = mpart.getWriteId();
     if (TxnUtils.isTransactionalTable(table.getParameters())) {
       if (!areTxnStatsSupported) {
         // Do not make persistent the following state since it is query specific (not global).
         StatsSetupConst.setBasicStatsState(part.getParameters(), StatsSetupConst.FALSE);
         LOG.info("Removed COLUMN_STATS_ACCURATE from Partition object's parameters.");
-      } else if (writeIdList != null) {
-        if (isCurrentStatsValidForTheQuery(part, mpart.getWriteId(), writeIdList, false)) {
+      } else if (validWriteIds != null) {
+        if (isCurrentStatsValidForTheQuery(part, statsWriteId, validWriteIds, false)) {
           part.setIsStatsCompliant(true);
         } else {
           part.setIsStatsCompliant(false);
@@ -4091,7 +4092,7 @@ public class ObjectStore implements RawStore, Configurable {
   }
 
   @Override
-  public void alterTable(String catName, String dbname, String name, Table newTable,
+  public Table alterTable(String catName, String dbname, String name, Table newTable,
       String queryValidWriteIds) throws InvalidObjectException, MetaException {
     boolean success = false;
     boolean registerCreationSignature = false;
@@ -4160,6 +4161,7 @@ public class ObjectStore implements RawStore, Configurable {
           oldt.setWriteId(newTable.getWriteId());
         }
       }
+      newTable = convertToTable(oldt);
 
       // commit the changes
       success = commitTransaction();
@@ -4168,6 +4170,7 @@ public class ObjectStore implements RawStore, Configurable {
         rollbackTransaction();
       }
     }
+    return newTable;
   }
 
   /**
@@ -4220,18 +4223,21 @@ public class ObjectStore implements RawStore, Configurable {
     }
   }
 
+  private static final class Ref<T> {
+    public T t;
+  }
+
   /**
    * Alters an existing partition. Initiates copy of SD. Returns the old CD.
    * @param dbname
    * @param name
    * @param part_vals Partition values (of the original partition instance)
    * @param newPart Partition object containing new information
-   * @return The column descriptor of the old partition instance (null if table is a view)
    * @throws InvalidObjectException
    * @throws MetaException
    */
-  private MColumnDescriptor alterPartitionNoTxn(String catName, String dbname, String name,
-    List<String> part_vals, Partition newPart, String queryValidWriteIds)
+  private Partition alterPartitionNoTxn(String catName, String dbname, String name,
+    List<String> part_vals, Partition newPart, String validWriteIds, Ref<MColumnDescriptor> oldCd)
       throws InvalidObjectException, MetaException {
     catName = normalizeIdentifier(catName);
     name = normalizeIdentifier(name);
@@ -4253,7 +4259,7 @@ public class ObjectStore implements RawStore, Configurable {
     if (isTxn && areTxnStatsSupported) {
       // Transactional table is altered without a txn. Make sure there are no changes to the flag.
       String errorMsg = verifyStatsChangeCtx(oldp.getParameters(), newPart.getParameters(),
-          newPart.getWriteId(), queryValidWriteIds, false);
+          newPart.getWriteId(), validWriteIds, false);
       if (errorMsg != null) {
         throw new MetaException(errorMsg);
       }
@@ -4274,9 +4280,9 @@ public class ObjectStore implements RawStore, Configurable {
     if (isTxn) {
       if (!areTxnStatsSupported) {
         StatsSetupConst.setBasicStatsState(oldp.getParameters(), StatsSetupConst.FALSE);
-      } else if (queryValidWriteIds != null && newPart.getWriteId() > 0) {
+      } else if (validWriteIds != null && newPart.getWriteId() > 0) {
         // Check concurrent INSERT case and set false to the flag.
-        if (!isCurrentStatsValidForTheQuery(oldp, queryValidWriteIds, true)) {
+        if (!isCurrentStatsValidForTheQuery(oldp, validWriteIds, true)) {
           StatsSetupConst.setBasicStatsState(oldp.getParameters(), StatsSetupConst.FALSE);
           LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the partition " +
                   dbname + "." + name + "." + oldp.getPartitionName() + " will be made persistent.");
@@ -4285,22 +4291,24 @@ public class ObjectStore implements RawStore, Configurable {
       }
     }
 
-    return oldCD;
+    oldCd.t = oldCD;
+    return convertToPart(oldp);
   }
 
   @Override
-  public void alterPartition(String catName, String dbname, String name, List<String> part_vals,
-      Partition newPart, String queryValidWriteIds) throws InvalidObjectException, MetaException {
+  public Partition alterPartition(String catName, String dbname, String name, List<String> part_vals,
+      Partition newPart, String validWriteIds) throws InvalidObjectException, MetaException {
     boolean success = false;
     Throwable e = null;
+    Partition result = null;
     try {
       openTransaction();
       if (newPart.isSetWriteId()) {
         LOG.warn("Alter partitions with write ID called without transaction information");
       }
-      MColumnDescriptor oldCd = alterPartitionNoTxn(
-          catName, dbname, name, part_vals, newPart, queryValidWriteIds);
-      removeUnusedColumnDescriptor(oldCd);
+      Ref<MColumnDescriptor> oldCd = new Ref<MColumnDescriptor>();
+      result = alterPartitionNoTxn(catName, dbname, name, part_vals, newPart, validWriteIds, oldCd);
+      removeUnusedColumnDescriptor(oldCd.t);
       // commit the changes
       success = commitTransaction();
     } catch (Throwable exception) {
@@ -4317,28 +4325,33 @@ public class ObjectStore implements RawStore, Configurable {
         throw metaException;
       }
     }
+    return result;
   }
 
   @Override
-  public void alterPartitions(String catName, String dbname, String name,
+  public List<Partition> alterPartitions(String catName, String dbname, String name,
                               List<List<String>> part_vals, List<Partition> newParts,
                               long writeId, String queryWriteIdList)
                                   throws InvalidObjectException, MetaException {
     boolean success = false;
     Exception e = null;
+    List<Partition> results = new ArrayList<>(newParts.size());
     try {
       openTransaction();
       Iterator<List<String>> part_val_itr = part_vals.iterator();
       Set<MColumnDescriptor> oldCds = new HashSet<>();
+      Ref<MColumnDescriptor> oldCdRef = new Ref<>();
       for (Partition tmpPart: newParts) {
         List<String> tmpPartVals = part_val_itr.next();
         if (writeId > 0) {
           tmpPart.setWriteId(writeId);
         }
-        MColumnDescriptor oldCd = alterPartitionNoTxn(
-            catName, dbname, name, tmpPartVals, tmpPart, queryWriteIdList);
-        if (oldCd != null) {
-          oldCds.add(oldCd);
+        oldCdRef.t = null;
+        Partition result = alterPartitionNoTxn(
+            catName, dbname, name, tmpPartVals, tmpPart, queryWriteIdList, oldCdRef);
+        results.add(result);
+        if (oldCdRef.t != null) {
+          oldCds.add(oldCdRef.t);
         }
       }
       for (MColumnDescriptor oldCd : oldCds) {
@@ -4360,6 +4373,7 @@ public class ObjectStore implements RawStore, Configurable {
         throw metaException;
       }
     }
+    return results;
   }
 
   private void copyMSD(MStorageDescriptor newSd, MStorageDescriptor oldSd) {
@@ -8408,7 +8422,7 @@ public class ObjectStore implements RawStore, Configurable {
   }
 
   @Override
-  public boolean updateTableColumnStatistics(ColumnStatistics colStats,
+  public Map<String, String> updateTableColumnStatistics(ColumnStatistics colStats,
       String validWriteIds, long writeId)
     throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
     boolean committed = false;
@@ -8467,7 +8481,8 @@ public class ObjectStore implements RawStore, Configurable {
       oldt.setParameters(newParams);
 
       committed = commitTransaction();
-      return committed;
+      // TODO: similar to update...Part, this used to do "return committed;"; makes little sense.
+      return committed ? newParams : null;
     } finally {
       if (!committed) {
         rollbackTransaction();
@@ -8502,8 +8517,8 @@ public class ObjectStore implements RawStore, Configurable {
   }
 
   @Override
-  public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, List<String> partVals,
-      String validWriteIds, long writeId)
+  public Map<String, String> updatePartitionColumnStatistics(ColumnStatistics colStats,
+      List<String> partVals, String validWriteIds, long writeId)
           throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException {
     boolean committed = false;
 
@@ -8561,7 +8576,8 @@ public class ObjectStore implements RawStore, Configurable {
 
       mPartition.setParameters(newParams);
       committed = commitTransaction();
-      return committed;
+      // TODO: what is the "return committed;" about? would it ever return false without throwing?
+      return committed ? newParams : null;
     } finally {
       if (!committed) {
         rollbackTransaction();
@@ -8828,8 +8844,6 @@ public class ObjectStore implements RawStore, Configurable {
         return null;
       }
 
-      // TODO: this should probably also return stats for partitions with valid stats,
-      //       and no stats for partitions with invalid stats.
       // Loop through the given "partNames" list
       // checking isolation-level-compliance of each partition column stats.
       for (String partName : partNames) {
@@ -12420,8 +12434,8 @@ public class ObjectStore implements RawStore, Configurable {
    */
   private boolean isCurrentStatsValidForTheQuery(MTable tbl, String queryValidWriteIdList,
       boolean isCompleteStatsWriter) throws MetaException {
-    return isCurrentStatsValidForTheQuery(conf, tbl.getDatabase().getName(), tbl.getTableName(),
-        tbl.getParameters(), tbl.getWriteId(), queryValidWriteIdList, isCompleteStatsWriter);
+    return isCurrentStatsValidForTheQuery(conf, tbl.getParameters(), tbl.getWriteId(),
+        queryValidWriteIdList, isCompleteStatsWriter);
   }
 
   /**
@@ -12441,22 +12455,21 @@ public class ObjectStore implements RawStore, Configurable {
   private boolean isCurrentStatsValidForTheQuery(MPartition part,
       String queryValidWriteIdList, boolean isCompleteStatsWriter)
       throws MetaException {
-    return isCurrentStatsValidForTheQuery(conf, part.getTable().getDatabase().getName(),
-        part.getTable().getTableName(), part.getParameters(), part.getWriteId(),
+    return isCurrentStatsValidForTheQuery(conf, part.getParameters(), part.getWriteId(),
         queryValidWriteIdList, isCompleteStatsWriter);
   }
 
   private boolean isCurrentStatsValidForTheQuery(Partition part, long partWriteId,
       String queryValidWriteIdList, boolean isCompleteStatsWriter)
       throws MetaException {
-    return isCurrentStatsValidForTheQuery(conf, part.getDbName(), part.getTableName(),
-        part.getParameters(), partWriteId, queryValidWriteIdList, isCompleteStatsWriter);
+    return isCurrentStatsValidForTheQuery(conf, part.getParameters(), partWriteId,
+        queryValidWriteIdList, isCompleteStatsWriter);
   }
 
   // TODO: move to somewhere else
-  public static boolean isCurrentStatsValidForTheQuery(Configuration conf, String dbName,
-      String tblName, Map<String, String> statsParams, long statsWriteId,
-      String queryValidWriteIdList, boolean isCompleteStatsWriter) throws MetaException {
+  public static boolean isCurrentStatsValidForTheQuery(Configuration conf,
+      Map<String, String> statsParams, long statsWriteId, String queryValidWriteIdList,
+      boolean isCompleteStatsWriter) throws MetaException {
 
     // Note: can be changed to debug/info to verify the calls.
     // TODO## change this to debug when merging

http://git-wip-us.apache.org/repos/asf/hive/blob/64d75a43/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
index 46082a5..8d647a0 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -358,10 +358,11 @@ public interface RawStore extends Configurable {
    * @param name name of the table.
    * @param newTable New table object.  Which parts of the table can be altered are
    *                 implementation specific.
+   * @return
    * @throws InvalidObjectException The new table object is invalid.
    * @throws MetaException something went wrong, usually in the RDBMS or storage.
    */
-  void alterTable(String catName, String dbname, String name, Table newTable,
+  Table alterTable(String catName, String dbname, String name, Table newTable,
       String queryValidWriteIds)
       throws InvalidObjectException, MetaException;
 
@@ -499,10 +500,11 @@ public interface RawStore extends Configurable {
    * @param part_vals partition values that describe the partition.
    * @param new_part new partition object.  This should be a complete copy of the old with
    *                 changes values, not just the parts to update.
+   * @return
    * @throws InvalidObjectException No such partition.
    * @throws MetaException error accessing the RDBMS.
    */
-  void alterPartition(String catName, String db_name, String tbl_name, List<String> part_vals,
+  Partition alterPartition(String catName, String db_name, String tbl_name, List<String> part_vals,
       Partition new_part, String queryValidWriteIds)
           throws InvalidObjectException, MetaException;
 
@@ -519,10 +521,11 @@ public interface RawStore extends Configurable {
    * @param txnId transaction id of the transaction that called this method.
    * @param writeIdList valid write id list of the transaction on the current table
    * @param writeid write id of the transaction for the table
+   * @return
    * @throws InvalidObjectException One of the indicated partitions does not exist.
    * @throws MetaException error accessing the RDBMS.
    */
-  void alterPartitions(String catName, String db_name, String tbl_name,
+  List<Partition> alterPartitions(String catName, String db_name, String tbl_name,
       List<List<String>> part_vals_list, List<Partition> new_parts, long writeId,
       String queryValidWriteIds)
       throws InvalidObjectException, MetaException;
@@ -864,7 +867,7 @@ public interface RawStore extends Configurable {
    * @throws InvalidObjectException the stats object is invalid
    * @throws InvalidInputException unable to record the stats for the table
    */
-  boolean updateTableColumnStatistics(ColumnStatistics colStats, String validWriteIds, long writeId)
+  Map<String, String> updateTableColumnStatistics(ColumnStatistics colStats, String validWriteIds, long writeId)
       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException;
 
   /** Persists the given column statistics object to the metastore
@@ -877,7 +880,7 @@ public interface RawStore extends Configurable {
    * @throws InvalidInputException unable to record the stats for the table
    * @throws TException
    */
-  boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,
+  Map<String, String> updatePartitionColumnStatistics(ColumnStatistics statsObj,
      List<String> partVals, String validWriteIds, long writeId)
      throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/64d75a43/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index dd705a5..f73047f 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregatorFa
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
 import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
 import org.apache.hadoop.hive.metastore.utils.FileUtils;
 import org.apache.hadoop.hive.metastore.utils.JavaUtils;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
@@ -92,6 +93,7 @@ public class CachedStore implements RawStore, Configurable {
   private static TablesPendingPrewarm tblsPendingPrewarm = new TablesPendingPrewarm();
   private RawStore rawStore = null;
   private Configuration conf;
+  private boolean areTxnStatsSupported;
   private PartitionExpressionProxy expressionProxy = null;
   private static final SharedCache sharedCache = new SharedCache();
 
@@ -129,6 +131,7 @@ public class CachedStore implements RawStore, Configurable {
     rawStore.setConf(conf);
     Configuration oldConf = this.conf;
     this.conf = conf;
+    this.areTxnStatsSupported = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_TXN_STATS_ENABLED);
     if (expressionProxy != null && conf != oldConf) {
       LOG.warn("Unexpected setConf when we were already configured");
     } else {
@@ -279,7 +282,6 @@ public class CachedStore implements RawStore, Configurable {
                     rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
                 Deadline.stopTimer();
               }
-              // TODO## should this take write ID into account? or at least cache write ID to verify?
               // If the table could not cached due to memory limit, stop prewarm
               boolean isSuccess = sharedCache.populateTableInCache(table, tableColStats, partitions,
                   partitionColStats, aggrStatsAllPartitions, aggrStatsAllButDefaultPartition);
@@ -542,24 +544,33 @@ public class CachedStore implements RawStore, Configurable {
 
 
     private void updateTableColStats(RawStore rawStore, String catName, String dbName, String tblName) {
+      boolean committed = false;
+      rawStore.openTransaction();
       try {
         Table table = rawStore.getTable(catName, dbName, tblName);
         if (!table.isSetPartitionKeys()) {
           List<String> colNames = MetaStoreUtils.getColumnNamesForTable(table);
           Deadline.startTimer("getTableColumnStatistics");
-          // TODO## should this take write ID into account? or at least cache write ID to verify?
+
           ColumnStatistics tableColStats =
               rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
           Deadline.stopTimer();
           if (tableColStats != null) {
-            // TODO## should this take write ID into account? or at least cache write ID to verify?
             sharedCache.refreshTableColStatsInCache(StringUtils.normalizeIdentifier(catName),
                 StringUtils.normalizeIdentifier(dbName),
                 StringUtils.normalizeIdentifier(tblName), tableColStats.getStatsObj());
+            // Update the table to get consistent stats state.
+            sharedCache.alterTableInCache(catName, dbName, tblName, table);
           }
         }
+        committed = rawStore.commitTransaction();
       } catch (MetaException | NoSuchObjectException e) {
         LOG.info("Unable to refresh table column stats for table: " + tblName, e);
+      } finally {
+        if (!committed) {
+          sharedCache.removeAllTableColStatsFromCache(catName, dbName, tblName);
+          rawStore.rollbackTransaction();
+        }
       }
     }
 
@@ -577,19 +588,31 @@ public class CachedStore implements RawStore, Configurable {
     }
 
     private void updateTablePartitionColStats(RawStore rawStore, String catName, String dbName, String tblName) {
+      boolean committed = false;
+      rawStore.openTransaction();
       try {
         Table table = rawStore.getTable(catName, dbName, tblName);
         List<String> colNames = MetaStoreUtils.getColumnNamesForTable(table);
         List<String> partNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1);
         // Get partition column stats for this table
         Deadline.startTimer("getPartitionColumnStatistics");
-        // TODO## should this take write ID into account? or at least cache write ID to verify?
         List<ColumnStatistics> partitionColStats =
             rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames);
         Deadline.stopTimer();
         sharedCache.refreshPartitionColStatsInCache(catName, dbName, tblName, partitionColStats);
+        List<Partition> parts = rawStore.getPartitionsByNames(catName, dbName, tblName, partNames);
+        // Also save partitions for consistency as they have the stats state.
+        for (Partition part : parts) {
+          sharedCache.alterPartitionInCache(catName, dbName, tblName, part.getValues(), part);
+        }
+        committed = rawStore.commitTransaction();
       } catch (MetaException | NoSuchObjectException e) {
         LOG.info("Updating CachedStore: unable to read partitions of table: " + tblName, e);
+      } finally {
+        if (!committed) {
+          sharedCache.removeAllPartitionColStatsFromCache(catName, dbName, tblName);
+          rawStore.rollbackTransaction();
+        }
       }
     }
 
@@ -828,31 +851,32 @@ public class CachedStore implements RawStore, Configurable {
     return getTable(catName, dbName, tblName, null);
   }
 
-  // TODO: if writeIdList is not null, check isolation level compliance for SVS,
-  // possibly with getTableFromCache() with table snapshot in cache.
   @Override
   public Table getTable(String catName, String dbName, String tblName,
-                        String writeIdList)
+                        String validWriteIds)
       throws MetaException {
     catName = normalizeIdentifier(catName);
     dbName = StringUtils.normalizeIdentifier(dbName);
     tblName = StringUtils.normalizeIdentifier(tblName);
     if (!shouldCacheTable(catName, dbName, tblName)) {
-      return rawStore.getTable(catName, dbName, tblName, writeIdList);
+      return rawStore.getTable(catName, dbName, tblName, validWriteIds);
     }
     Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
-    if (tbl == null || writeIdList != null) {
+    if (tbl == null) {
       // This table is not yet loaded in cache
       // If the prewarm thread is working on this table's database,
       // let's move this table to the top of tblNamesBeingPrewarmed stack,
       // so that it gets loaded to the cache faster and is available for subsequent requests
       tblsPendingPrewarm.prioritizeTableForPrewarm(tblName);
-      return rawStore.getTable(catName, dbName, tblName, writeIdList);
+      return rawStore.getTable(catName, dbName, tblName, validWriteIds);
     }
-    if (tbl != null) {
-      tbl.unsetPrivileges();
-      tbl.setRewriteEnabled(tbl.isRewriteEnabled());
+    if (validWriteIds != null) {
+      tbl.setParameters(adjustStatsParamsForGet(tbl.getParameters(),
+          tbl.getParameters(), tbl.getWriteId(), validWriteIds));
     }
+
+    tbl.unsetPrivileges();
+    tbl.setRewriteEnabled(tbl.isRewriteEnabled());
     return tbl;
   }
 
@@ -913,24 +937,34 @@ public class CachedStore implements RawStore, Configurable {
     return getPartition(catName, dbName, tblName, part_vals, null);
   }
 
-  // TODO: the same as getTable()
   @Override
   public Partition getPartition(String catName, String dbName, String tblName,
-                                List<String> part_vals, String writeIdList)
+                                List<String> part_vals, String validWriteIds)
       throws MetaException, NoSuchObjectException {
     catName = normalizeIdentifier(catName);
     dbName = StringUtils.normalizeIdentifier(dbName);
     tblName = StringUtils.normalizeIdentifier(tblName);
     if (!shouldCacheTable(catName, dbName, tblName)) {
       return rawStore.getPartition(
-          catName, dbName, tblName, part_vals, writeIdList);
+          catName, dbName, tblName, part_vals, validWriteIds);
     }
     Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, part_vals);
-    if (part == null || writeIdList != null) {
+    if (part == null) {
       // The table containing the partition is not yet loaded in cache
       return rawStore.getPartition(
-          catName, dbName, tblName, part_vals, writeIdList);
+          catName, dbName, tblName, part_vals, validWriteIds);
+    }
+    if (validWriteIds != null) {
+      Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+      if (table == null) {
+        // The table containing the partition is not yet loaded in cache
+        return rawStore.getPartition(
+            catName, dbName, tblName, part_vals, validWriteIds);
+      }
+      part.setParameters(adjustStatsParamsForGet(table.getParameters(),
+          part.getParameters(), part.getWriteId(), validWriteIds));
     }
+
     return part;
   }
 
@@ -1010,21 +1044,21 @@ public class CachedStore implements RawStore, Configurable {
   }
 
   @Override
-  public void alterTable(String catName, String dbName, String tblName, Table newTable,
+  public Table alterTable(String catName, String dbName, String tblName, Table newTable,
       String validWriteIds) throws InvalidObjectException, MetaException {
-    rawStore.alterTable(catName, dbName, tblName, newTable, validWriteIds);
+    newTable = rawStore.alterTable(catName, dbName, tblName, newTable, validWriteIds);
     catName = normalizeIdentifier(catName);
     dbName = normalizeIdentifier(dbName);
     tblName = normalizeIdentifier(tblName);
     String newTblName = normalizeIdentifier(newTable.getTableName());
     if (!shouldCacheTable(catName, dbName, tblName) &&
         !shouldCacheTable(catName, dbName, newTblName)) {
-      return;
+      return newTable;
     }
     Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
     if (tbl == null) {
       // The table is not yet loaded in cache
-      return;
+      return newTable;
     }
     if (shouldCacheTable(catName, dbName, tblName) && shouldCacheTable(catName, dbName, newTblName)) {
       // If old table is in the cache and the new table can also be cached
@@ -1036,6 +1070,7 @@ public class CachedStore implements RawStore, Configurable {
       // If old table is in the cache but the new table *cannot* be cached
       sharedCache.removeTableFromCache(catName, dbName, tblName);
     }
+    return newTable;
   }
 
   @Override
@@ -1161,34 +1196,35 @@ public class CachedStore implements RawStore, Configurable {
   }
 
   @Override
-  public void alterPartition(String catName, String dbName, String tblName, List<String> partVals,
-                             Partition newPart, String queryValidWriteIds)
-                                 throws InvalidObjectException, MetaException {
-    rawStore.alterPartition(catName, dbName, tblName, partVals, newPart, queryValidWriteIds);
+  public Partition alterPartition(String catName, String dbName, String tblName,
+      List<String> partVals, Partition newPart, String validWriteIds)
+          throws InvalidObjectException, MetaException {
+    newPart = rawStore.alterPartition(catName, dbName, tblName, partVals, newPart, validWriteIds);
     catName = normalizeIdentifier(catName);
     dbName = normalizeIdentifier(dbName);
     tblName = normalizeIdentifier(tblName);
     if (!shouldCacheTable(catName, dbName, tblName)) {
-      return;
+      return newPart;
     }
     sharedCache.alterPartitionInCache(catName, dbName, tblName, partVals, newPart);
+    return newPart;
   }
 
   @Override
-  public void alterPartitions(String catName, String dbName, String tblName,
+  public List<Partition> alterPartitions(String catName, String dbName, String tblName,
                               List<List<String>> partValsList, List<Partition> newParts,
                               long writeId, String validWriteIds)
       throws InvalidObjectException, MetaException {
-    rawStore.alterPartitions(
+    newParts = rawStore.alterPartitions(
         catName, dbName, tblName, partValsList, newParts, writeId, validWriteIds);
     catName = normalizeIdentifier(catName);
     dbName = normalizeIdentifier(dbName);
     tblName = normalizeIdentifier(tblName);
     if (!shouldCacheTable(catName, dbName, tblName)) {
-      return;
+      return newParts;
     }
-    // TODO: modify the following method for the case when writeIdList != null.
     sharedCache.alterPartitionsInCache(catName, dbName, tblName, partValsList, newParts);
+    return newParts;
   }
 
   private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr,
@@ -1598,34 +1634,67 @@ public class CachedStore implements RawStore, Configurable {
     return partitions;
   }
 
+  // Note: ideally this should be above both CachedStore and ObjectStore.
+  private Map<String, String> adjustStatsParamsForGet(Map<String, String> tableParams,
+      Map<String, String> params, long statsWriteId, String validWriteIds) throws MetaException {
+    if (!TxnUtils.isTransactionalTable(tableParams)) return params; // Not a txn table.
+    if (areTxnStatsSupported && ((validWriteIds == null)
+        || ObjectStore.isCurrentStatsValidForTheQuery(
+            conf, params, statsWriteId, validWriteIds, false))) {
+      // Valid stats are supported for txn tables, and either no verification was requested by the
+      // caller, or the verification has succeeded.
+      return params;
+    }
+    // Clone the map to avoid affecting the cached value.
+    params = new HashMap<>(params);
+    StatsSetupConst.setBasicStatsState(params, StatsSetupConst.FALSE);
+    return params;
+  }
+
+
+  // Note: ideally this should be above both CachedStore and ObjectStore.
+  private ColumnStatistics adjustColStatForGet(Map<String, String> tableParams,
+      Map<String, String> params, ColumnStatistics colStat, long statsWriteId,
+      String validWriteIds) throws MetaException {
+    colStat.setIsStatsCompliant(true);
+    if (!TxnUtils.isTransactionalTable(tableParams)) return colStat; // Not a txn table.
+    if (areTxnStatsSupported && ((validWriteIds == null)
+        || ObjectStore.isCurrentStatsValidForTheQuery(
+            conf, params, statsWriteId, validWriteIds, false))) {
+      // Valid stats are supported for txn tables, and either no verification was requested by the
+      // caller, or the verification has succeeded.
+      return colStat;
+    }
+    // Don't clone; ColStats objects are not cached, only their parts.
+    colStat.setIsStatsCompliant(false);
+    return colStat;
+  }
+
   @Override
-  public boolean updateTableColumnStatistics(ColumnStatistics colStats, String validWriteIds, long writeId)
+  public Map<String, String> updateTableColumnStatistics(ColumnStatistics colStats,
+      String validWriteIds, long writeId)
       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
-    boolean succ = rawStore.updateTableColumnStatistics(colStats, validWriteIds, writeId);
-    if (succ) {
+    Map<String, String> newParams = rawStore.updateTableColumnStatistics(
+        colStats, validWriteIds, writeId);
+    if (newParams != null) {
       String catName = colStats.getStatsDesc().isSetCatName() ?
           normalizeIdentifier(colStats.getStatsDesc().getCatName()) :
           getDefaultCatalog(conf);
       String dbName = normalizeIdentifier(colStats.getStatsDesc().getDbName());
       String tblName = normalizeIdentifier(colStats.getStatsDesc().getTableName());
       if (!shouldCacheTable(catName, dbName, tblName)) {
-        return succ;
+        return newParams;
       }
       Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
       if (table == null) {
         // The table is not yet loaded in cache
-        return succ;
+        return newParams;
       }
-      List<ColumnStatisticsObj> statsObjs = colStats.getStatsObj();
-      List<String> colNames = new ArrayList<>();
-      for (ColumnStatisticsObj statsObj : statsObjs) {
-        colNames.add(statsObj.getColName());
-      }
-      StatsSetupConst.setColumnStatsState(table.getParameters(), colNames);
+      table.setParameters(newParams);
       sharedCache.alterTableInCache(catName, dbName, tblName, table);
-      sharedCache.updateTableColStatsInCache(catName, dbName, tblName, statsObjs);
+      sharedCache.updateTableColStatsInCache(catName, dbName, tblName, colStats.getStatsObj());
     }
-    return succ;
+    return newParams;
   }
 
   @Override
@@ -1634,29 +1703,29 @@ public class CachedStore implements RawStore, Configurable {
     return getTableColumnStatistics(catName, dbName, tblName, colNames, null);
   }
 
-  // TODO: the same as getTable()
   @Override
   public ColumnStatistics getTableColumnStatistics(
       String catName, String dbName, String tblName, List<String> colNames,
-      String writeIdList)
+      String validWriteIds)
       throws MetaException, NoSuchObjectException {
     catName = StringUtils.normalizeIdentifier(catName);
     dbName = StringUtils.normalizeIdentifier(dbName);
     tblName = StringUtils.normalizeIdentifier(tblName);
     if (!shouldCacheTable(catName, dbName, tblName)) {
       return rawStore.getTableColumnStatistics(
-          catName, dbName, tblName, colNames, writeIdList);
+          catName, dbName, tblName, colNames, validWriteIds);
     }
     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
-    if (table == null || writeIdList != null) {
+    if (table == null) {
       // The table is not yet loaded in cache
       return rawStore.getTableColumnStatistics(
-          catName, dbName, tblName, colNames, writeIdList);
+          catName, dbName, tblName, colNames, validWriteIds);
     }
     ColumnStatisticsDesc csd = new ColumnStatisticsDesc(true, dbName, tblName);
     List<ColumnStatisticsObj> colStatObjs =
         sharedCache.getTableColStatsFromCache(catName, dbName, tblName, colNames);
-    return new ColumnStatistics(csd, colStatObjs);
+    return adjustColStatForGet(table.getParameters(), table.getParameters(),
+        new ColumnStatistics(csd, colStatObjs), table.getWriteId(), validWriteIds);
   }
 
   @Override
@@ -1677,36 +1746,31 @@ public class CachedStore implements RawStore, Configurable {
   }
 
   @Override
-  public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, List<String> partVals,
-      String validWriteIds, long writeId)
+  public Map<String, String> updatePartitionColumnStatistics(ColumnStatistics colStats,
+      List<String> partVals, String validWriteIds, long writeId)
       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
-    boolean succ = rawStore.updatePartitionColumnStatistics(colStats, partVals, validWriteIds, writeId);
-    if (succ) {
+    Map<String, String> newParams = rawStore.updatePartitionColumnStatistics(
+        colStats, partVals, validWriteIds, writeId);
+    if (newParams != null) {
       String catName = colStats.getStatsDesc().isSetCatName() ?
           normalizeIdentifier(colStats.getStatsDesc().getCatName()) : DEFAULT_CATALOG_NAME;
       String dbName = normalizeIdentifier(colStats.getStatsDesc().getDbName());
       String tblName = normalizeIdentifier(colStats.getStatsDesc().getTableName());
       if (!shouldCacheTable(catName, dbName, tblName)) {
-        return succ;
+        return newParams;
       }
-      List<ColumnStatisticsObj> statsObjs = colStats.getStatsObj();
       Partition part = getPartition(catName, dbName, tblName, partVals);
-      List<String> colNames = new ArrayList<>();
-      for (ColumnStatisticsObj statsObj : statsObjs) {
-        colNames.add(statsObj.getColName());
-      }
-      StatsSetupConst.setColumnStatsState(part.getParameters(), colNames);
+      part.setParameters(newParams);
       sharedCache.alterPartitionInCache(catName, dbName, tblName, partVals, part);
       sharedCache.updatePartitionColStatsInCache(catName, dbName, tblName, partVals, colStats.getStatsObj());
     }
-    return succ;
+    return newParams;
   }
 
   @Override
-  // TODO: calculate from cached values.
   public List<ColumnStatistics> getPartitionColumnStatistics(String catName, String dbName, String tblName,
       List<String> partNames, List<String> colNames) throws MetaException, NoSuchObjectException {
-    return rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames);
+    return getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames, null);
   }
 
   @Override
@@ -1714,6 +1778,8 @@ public class CachedStore implements RawStore, Configurable {
       String catName, String dbName, String tblName, List<String> partNames,
       List<String> colNames, String writeIdList)
       throws MetaException, NoSuchObjectException {
+    // TODO: why have updatePartitionColumnStatistics cache if this is a bypass?
+    // Note: when implemented, this needs to call adjustColStatForGet, like other get methods.
     return rawStore.getPartitionColumnStatistics(
         catName, dbName, tblName, partNames, colNames, writeIdList);
   }
@@ -1743,7 +1809,6 @@ public class CachedStore implements RawStore, Configurable {
   }
 
   @Override
-  // TODO: the same as getTable() for transactional stats.
   public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName,
                                       List<String> partNames, List<String> colNames,
                                       String writeIdList)
@@ -1752,16 +1817,19 @@ public class CachedStore implements RawStore, Configurable {
     catName = normalizeIdentifier(catName);
     dbName = StringUtils.normalizeIdentifier(dbName);
     tblName = StringUtils.normalizeIdentifier(tblName);
-    if (!shouldCacheTable(catName, dbName, tblName)) {
+    // TODO: we currently cannot do transactional checks for stats here
+    //       (incl. due to lack of sync w.r.t. the below rawStore call).
+    if (!shouldCacheTable(catName, dbName, tblName) || writeIdList != null) {
       rawStore.get_aggr_stats_for(
           catName, dbName, tblName, partNames, colNames, writeIdList);
     }
     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
-    if (table == null || writeIdList != null) {
+    if (table == null) {
       // The table is not yet loaded in cache
       return rawStore.get_aggr_stats_for(
           catName, dbName, tblName, partNames, colNames, writeIdList);
     }
+
     List<String> allPartNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1);
     if (partNames.size() == allPartNames.size()) {
       colStats = sharedCache.getAggrStatsFromCache(catName, dbName, tblName, colNames, StatsType.ALL);

http://git-wip-us.apache.org/repos/asf/hive/blob/64d75a43/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
index 15b1aa1..24f940c 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/SharedCache.java
@@ -468,6 +468,16 @@ public class SharedCache {
       }
     }
 
+    public void removeAllTableColStats() {
+      try {
+        tableLock.writeLock().lock();
+        tableColStatsCache.clear();
+        isTableColStatsCacheDirty.set(true);
+      } finally {
+        tableLock.writeLock().unlock();
+      }
+    }
+
     public ColumnStatisticsObj getPartitionColStats(List<String> partVal, String colName) {
       try {
         tableLock.readLock().lock();
@@ -539,6 +549,20 @@ public class SharedCache {
       }
     }
 
+    public void removeAllPartitionColStats() {
+      try {
+        tableLock.writeLock().lock();
+        partitionColStatsCache.clear();
+        isPartitionColStatsCacheDirty.set(true);
+        // Invalidate cached aggregate stats
+        if (!aggrColStatsCache.isEmpty()) {
+          aggrColStatsCache.clear();
+        }
+      } finally {
+        tableLock.writeLock().unlock();
+      }
+    }
+
     public void refreshPartitionColStats(List<ColumnStatistics> partitionColStats) {
       Map<String, ColumnStatisticsObj> newPartitionColStatsCache =
           new HashMap<String, ColumnStatisticsObj>();
@@ -1292,6 +1316,18 @@ public class SharedCache {
     }
   }
 
+  public void removeAllTableColStatsFromCache(String catName, String dbName, String tblName) {
+    try {
+      cacheLock.readLock().lock();
+      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
+      if (tblWrapper != null) {
+        tblWrapper.removeAllTableColStats();
+      }
+    } finally {
+      cacheLock.readLock().unlock();
+    }
+  }
+
   public void updateTableColStatsInCache(String catName, String dbName, String tableName,
       List<ColumnStatisticsObj> colStatsForTable) {
     try {
@@ -1505,6 +1541,18 @@ public class SharedCache {
     }
   }
 
+  public void removeAllPartitionColStatsFromCache(String catName, String dbName, String tblName) {
+    try {
+      cacheLock.readLock().lock();
+      TableWrapper tblWrapper = tableCache.get(CacheUtils.buildTableKey(catName, dbName, tblName));
+      if (tblWrapper != null) {
+        tblWrapper.removeAllPartitionColStats();
+      }
+    } finally {
+      cacheLock.readLock().unlock();
+    }
+  }
+
   public void updatePartitionColStatsInCache(String catName, String dbName, String tableName,
       List<String> partVals, List<ColumnStatisticsObj> colStatsObjs) {
     try {

http://git-wip-us.apache.org/repos/asf/hive/blob/64d75a43/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index fb14536..09c2509 100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@ -292,10 +292,10 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
   }
 
   @Override
-  public void alterTable(String catName, String dbName, String name, Table newTable,
+  public Table alterTable(String catName, String dbName, String name, Table newTable,
       String queryValidWriteIds)
       throws InvalidObjectException, MetaException {
-    objectStore.alterTable(catName, dbName, name, newTable, queryValidWriteIds);
+    return objectStore.alterTable(catName, dbName, name, newTable, queryValidWriteIds);
   }
 
   @Override
@@ -357,16 +357,16 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
   }
 
   @Override
-  public void alterPartition(String catName, String dbName, String tblName, List<String> partVals,
+  public Partition alterPartition(String catName, String dbName, String tblName, List<String> partVals,
       Partition newPart, String queryValidWriteIds) throws InvalidObjectException, MetaException {
-    objectStore.alterPartition(catName, dbName, tblName, partVals, newPart, queryValidWriteIds);
+    return objectStore.alterPartition(catName, dbName, tblName, partVals, newPart, queryValidWriteIds);
   }
 
   @Override
-  public void alterPartitions(String catName, String dbName, String tblName,
+  public List<Partition> alterPartitions(String catName, String dbName, String tblName,
       List<List<String>> partValsList, List<Partition> newParts,
       long writeId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
-    objectStore.alterPartitions(
+    return objectStore.alterPartitions(
         catName, dbName, tblName, partValsList, newParts, writeId, queryValidWriteIds);
   }
 
@@ -694,14 +694,14 @@ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
   }
 
   @Override
-  public boolean updateTableColumnStatistics(ColumnStatistics statsObj, String validWriteIds, long writeId)
+  public Map<String, String> updateTableColumnStatistics(ColumnStatistics statsObj, String validWriteIds, long writeId)
       throws NoSuchObjectException, MetaException, InvalidObjectException,
       InvalidInputException {
     return objectStore.updateTableColumnStatistics(statsObj, validWriteIds, writeId);
   }
 
   @Override
-  public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,
+  public Map<String, String> updatePartitionColumnStatistics(ColumnStatistics statsObj,
       List<String> partVals, String validWriteIds, long writeId)
       throws NoSuchObjectException, MetaException, InvalidObjectException,
       InvalidInputException {

http://git-wip-us.apache.org/repos/asf/hive/blob/64d75a43/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
index 52785a6..3aebaf3 100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@ -290,8 +290,9 @@ public class DummyRawStoreForJdoConnection implements RawStore {
   }
 
   @Override
-  public void alterTable(String catName, String dbname, String name, Table newTable, String queryValidWriteIds)
+  public Table alterTable(String catName, String dbname, String name, Table newTable, String queryValidWriteIds)
       throws InvalidObjectException, MetaException {
+    return newTable;
   }
 
   @Override
@@ -358,14 +359,16 @@ public class DummyRawStoreForJdoConnection implements RawStore {
   }
 
   @Override
-  public void alterPartition(String catName, String db_name, String tbl_name, List<String> part_vals,
+  public Partition alterPartition(String catName, String db_name, String tbl_name, List<String> part_vals,
       Partition new_part, String queryValidWriteIds) throws InvalidObjectException, MetaException {
+    return new_part;
   }
 
   @Override
-  public void alterPartitions(String catName, String db_name, String tbl_name,
+  public List<Partition> alterPartitions(String catName, String db_name, String tbl_name,
       List<List<String>> part_vals_list, List<Partition> new_parts,
       long writeId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
+    return new_parts;
   }
 
   @Override
@@ -743,17 +746,17 @@ public class DummyRawStoreForJdoConnection implements RawStore {
   }
 
   @Override
-  public boolean updateTableColumnStatistics(ColumnStatistics statsObj,
+  public Map<String, String> updateTableColumnStatistics(ColumnStatistics statsObj,
       String  validWriteIds, long writeId)
       throws NoSuchObjectException, MetaException, InvalidObjectException {
-    return false;
+    return null;
   }
 
   @Override
-  public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,List<String> partVals,
+  public Map<String, String> updatePartitionColumnStatistics(ColumnStatistics statsObj,List<String> partVals,
       String  validWriteIds, long writeId)
     throws NoSuchObjectException, MetaException, InvalidObjectException {
-    return false;
+    return null;
   }
 
   @Override


[49/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0724

Posted by se...@apache.org.
HIVE-19416 : merge master into branch (Sergey Shelukhin) 0724


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5e7a8b59
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5e7a8b59
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5e7a8b59

Branch: refs/heads/master
Commit: 5e7a8b59cae36ccdd70c6540cf6ab3d2dfe1e735
Parents: 7bd688b 26f4d8e
Author: sergey <se...@apache.org>
Authored: Tue Jul 24 12:40:08 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Tue Jul 24 12:40:08 2018 -0700

----------------------------------------------------------------------
 .../predicate/TestAccumuloRangeGenerator.java   |   7 +-
 .../test/resources/testconfiguration.properties |   1 +
 .../DTIColumnArithmeticDTIColumnNoConvert.txt   |   1 -
 .../DTIScalarArithmeticDTIColumnNoConvert.txt   |   1 -
 .../org/apache/hadoop/hive/ql/ErrorMsg.java     |   3 +
 .../hadoop/hive/ql/exec/FunctionRegistry.java   |   7 +-
 .../apache/hadoop/hive/ql/exec/MoveTask.java    |   2 +-
 .../spark/status/impl/RemoteSparkJobStatus.java | 108 +++-
 .../ql/exec/vector/VectorizationContext.java    |  26 +-
 .../vector/expressions/CastDecimalToString.java |   2 +-
 .../hive/ql/index/IndexPredicateAnalyzer.java   |   2 +
 .../apache/hadoop/hive/ql/metadata/Hive.java    |  22 +-
 .../calcite/translator/RexNodeConverter.java    |   4 +-
 .../hive/ql/optimizer/physical/Vectorizer.java  |   3 +-
 .../hive/ql/parse/DDLSemanticAnalyzer.java      |   1 +
 .../hadoop/hive/ql/parse/TezCompiler.java       |  42 ++
 .../hadoop/hive/ql/plan/LoadTableDesc.java      |  10 +-
 .../apache/hadoop/hive/ql/plan/TableDesc.java   |   3 +
 .../apache/hadoop/hive/ql/udf/UDFToString.java  | 181 ------
 .../hive/ql/udf/generic/GenericUDFToString.java |  79 +++
 .../vector/expressions/TestVectorTypeCasts.java |   6 +-
 .../queries/clientpositive/external_insert.q    |  14 +
 .../clientpositive/murmur_hash_migration.q      |  61 ++
 .../clientpositive/autoColumnStats_6.q.out      |   2 +-
 .../clientpositive/bucket_map_join_spark1.q.out |   4 +-
 .../clientpositive/bucket_map_join_spark2.q.out |   4 +-
 .../clientpositive/bucket_map_join_spark3.q.out |   4 +-
 .../results/clientpositive/bucketmapjoin5.q.out |   4 +-
 .../clientpositive/bucketmapjoin_negative.q.out |   2 +-
 .../bucketmapjoin_negative2.q.out               |   2 +-
 .../bucketsortoptimize_insert_3.q.out           |   2 +-
 .../clientpositive/char_pad_convert.q.out       |   4 +-
 .../column_pruner_multiple_children.q.out       |   2 +-
 .../test/results/clientpositive/decimal_2.q.out |   4 +-
 .../clientpositive/external_insert.q.out        | 158 +++++
 .../test/results/clientpositive/groupby12.q.out |   2 +-
 .../test/results/clientpositive/groupby5.q.out  |   2 +-
 .../clientpositive/groupby5_noskew.q.out        |   2 +-
 .../results/clientpositive/groupby7_map.q.out   |   4 +-
 .../groupby7_map_multi_single_reducer.q.out     |   4 +-
 .../clientpositive/groupby7_map_skew.q.out      |   4 +-
 .../clientpositive/groupby7_noskew.q.out        |   4 +-
 .../groupby7_noskew_multi_single_reducer.q.out  |   4 +-
 .../test/results/clientpositive/groupby8.q.out  |   8 +-
 .../results/clientpositive/groupby8_map.q.out   |   4 +-
 .../clientpositive/groupby8_map_skew.q.out      |   4 +-
 .../clientpositive/groupby8_noskew.q.out        |   4 +-
 .../test/results/clientpositive/groupby9.q.out  |  20 +-
 .../clientpositive/groupby_cube_multi_gby.q.out |   2 +-
 .../clientpositive/groupby_position.q.out       |   8 +-
 .../clientpositive/groupby_sort_1_23.q.out      |   2 +-
 .../clientpositive/groupby_sort_skew_1_23.q.out |   4 +-
 .../infer_bucket_sort_dyn_part.q.out            |   2 +-
 .../infer_bucket_sort_grouping_operators.q.out  |   6 +-
 .../infer_bucket_sort_map_operators.q.out       |   6 +-
 .../infer_bucket_sort_num_buckets.q.out         |   2 +-
 .../results/clientpositive/input_part10.q.out   |   2 +-
 .../clientpositive/llap/bucketmapjoin1.q.out    |   4 +-
 .../clientpositive/llap/bucketmapjoin2.q.out    |   6 +-
 .../clientpositive/llap/bucketmapjoin3.q.out    |   4 +-
 .../clientpositive/llap/bucketmapjoin4.q.out    |   4 +-
 .../llap/default_constraint.q.out               |   4 +-
 .../llap/dynamic_partition_pruning.q.out        |  16 +-
 .../llap/insert_into_default_keyword.q.out      |  20 +-
 .../llap/materialized_view_rewrite_6.q.out      |   4 +-
 .../llap/multi_insert_lateral_view.q.out        |  32 +-
 .../llap/murmur_hash_migration.q.out            | 618 +++++++++++++++++++
 .../clientpositive/llap/orc_merge1.q.out        |   6 +-
 .../clientpositive/llap/orc_merge10.q.out       |   6 +-
 .../clientpositive/llap/orc_merge2.q.out        |   2 +-
 .../clientpositive/llap/orc_merge_diff_fs.q.out |   6 +-
 .../clientpositive/llap/rcfile_merge2.q.out     |   2 +-
 .../llap/schema_evol_text_nonvec_part.q.out     |   4 +-
 ...hema_evol_text_nonvec_part_all_complex.q.out |  16 +-
 ...l_text_nonvec_part_all_complex_llap_io.q.out |  16 +-
 .../schema_evol_text_nonvec_part_llap_io.q.out  |   4 +-
 .../llap/schema_evol_text_vec_part.q.out        |   4 +-
 .../schema_evol_text_vec_part_all_complex.q.out |  16 +-
 .../llap/schema_evol_text_vecrow_part.q.out     |   4 +-
 ...hema_evol_text_vecrow_part_all_complex.q.out |  16 +-
 .../results/clientpositive/llap/stats11.q.out   |   4 +-
 .../clientpositive/llap/subquery_multi.q.out    |   6 +-
 .../llap/tez_union_multiinsert.q.out            |  30 +-
 .../results/clientpositive/llap/union6.q.out    |   2 +-
 .../clientpositive/llap/unionDistinct_1.q.out   |  18 +-
 .../clientpositive/llap/unionDistinct_3.q.out   |  10 +-
 .../llap/vector_case_when_1.q.out               |  10 +-
 .../llap/vector_char_mapjoin1.q.out             |   6 +-
 .../clientpositive/llap/vector_decimal_1.q.out  |   2 +-
 .../clientpositive/llap/vector_decimal_2.q.out  |   8 +-
 .../llap/vector_decimal_expressions.q.out       |  28 +-
 .../llap/vector_string_concat.q.out             |   2 +-
 .../clientpositive/llap/vector_udf1.q.out       |   6 +-
 .../llap/vector_varchar_mapjoin1.q.out          |   6 +-
 .../clientpositive/llap/vectorized_casts.q.out  |   2 +-
 .../vectorized_dynamic_partition_pruning.q.out  |  16 +-
 .../clientpositive/multi_insert_mixed.q.out     |   4 +-
 .../results/clientpositive/orc_merge1.q.out     |   6 +-
 .../results/clientpositive/orc_merge10.q.out    |   6 +-
 .../results/clientpositive/orc_merge2.q.out     |   2 +-
 .../clientpositive/orc_merge_diff_fs.q.out      |   6 +-
 .../clientpositive/perf/spark/query36.q.out     |   6 +-
 .../clientpositive/perf/spark/query70.q.out     |   6 +-
 .../clientpositive/perf/spark/query86.q.out     |   6 +-
 .../clientpositive/perf/tez/query36.q.out       |   4 +-
 .../clientpositive/perf/tez/query70.q.out       |   4 +-
 .../clientpositive/perf/tez/query86.q.out       |   4 +-
 .../results/clientpositive/show_functions.q.out |   1 -
 .../results/clientpositive/smb_mapjoin_20.q.out |  12 +-
 .../spark/bucket_map_join_spark1.q.out          |   4 +-
 .../spark/bucket_map_join_spark2.q.out          |   4 +-
 .../spark/bucket_map_join_spark3.q.out          |   4 +-
 .../clientpositive/spark/bucketmapjoin1.q.out   |   4 +-
 .../clientpositive/spark/bucketmapjoin2.q.out   |   6 +-
 .../clientpositive/spark/bucketmapjoin3.q.out   |   4 +-
 .../clientpositive/spark/bucketmapjoin4.q.out   |   4 +-
 .../clientpositive/spark/bucketmapjoin5.q.out   |   4 +-
 .../spark/bucketmapjoin_negative.q.out          |   2 +-
 .../spark/bucketmapjoin_negative2.q.out         |   2 +-
 .../spark/dynamic_rdd_cache.q.out               |   8 +-
 .../results/clientpositive/spark/groupby5.q.out |   2 +-
 .../clientpositive/spark/groupby5_noskew.q.out  |   2 +-
 .../clientpositive/spark/groupby7_map.q.out     |   4 +-
 .../groupby7_map_multi_single_reducer.q.out     |   4 +-
 .../spark/groupby7_map_skew.q.out               |   4 +-
 .../clientpositive/spark/groupby7_noskew.q.out  |   4 +-
 .../groupby7_noskew_multi_single_reducer.q.out  |   4 +-
 .../results/clientpositive/spark/groupby8.q.out |   8 +-
 .../clientpositive/spark/groupby8_map.q.out     |   4 +-
 .../spark/groupby8_map_skew.q.out               |   4 +-
 .../clientpositive/spark/groupby8_noskew.q.out  |   4 +-
 .../results/clientpositive/spark/groupby9.q.out |  20 +-
 .../clientpositive/spark/groupby_position.q.out |   8 +-
 .../spark/groupby_sort_1_23.q.out               |   2 +-
 .../spark/groupby_sort_skew_1_23.q.out          |   4 +-
 .../spark/infer_bucket_sort_map_operators.q.out |   6 +-
 .../spark/multi_insert_lateral_view.q.out       |  32 +-
 .../spark/multi_insert_mixed.q.out              |   4 +-
 .../clientpositive/spark/smb_mapjoin_20.q.out   |  12 +-
 .../spark/spark_dynamic_partition_pruning.q.out |  44 +-
 ...k_vectorized_dynamic_partition_pruning.q.out |  44 +-
 .../results/clientpositive/spark/stats1.q.out   |   2 +-
 .../clientpositive/spark/subquery_multi.q.out   |   6 +-
 .../results/clientpositive/spark/union17.q.out  |   8 +-
 .../results/clientpositive/spark/union18.q.out  |   2 +-
 .../results/clientpositive/spark/union19.q.out  |   4 +-
 .../results/clientpositive/spark/union20.q.out  |   4 +-
 .../results/clientpositive/spark/union32.q.out  |   4 +-
 .../results/clientpositive/spark/union33.q.out  |   4 +-
 .../results/clientpositive/spark/union6.q.out   |   2 +-
 .../clientpositive/spark/union_remove_19.q.out  |   4 +-
 .../spark/vector_string_concat.q.out            |   2 +-
 ql/src/test/results/clientpositive/stats1.q.out |   2 +-
 .../results/clientpositive/tablevalues.q.out    |   2 +-
 ql/src/test/results/clientpositive/udf3.q.out   |   2 +-
 .../results/clientpositive/udf_string.q.out     |   9 +-
 .../test/results/clientpositive/union17.q.out   |   6 +-
 .../test/results/clientpositive/union18.q.out   |   2 +-
 .../test/results/clientpositive/union19.q.out   |   4 +-
 .../test/results/clientpositive/union20.q.out   |   4 +-
 .../test/results/clientpositive/union32.q.out   |   4 +-
 .../test/results/clientpositive/union33.q.out   |   4 +-
 ql/src/test/results/clientpositive/union6.q.out |   2 +-
 .../clientpositive/union_remove_19.q.out        |   4 +-
 .../clientpositive/vector_case_when_1.q.out     |  10 +-
 .../clientpositive/vector_char_mapjoin1.q.out   |   4 +-
 .../clientpositive/vector_decimal_1.q.out       |   2 +-
 .../vector_decimal_expressions.q.out            |  28 +-
 .../clientpositive/vector_string_concat.q.out   |   2 +-
 .../vector_varchar_mapjoin1.q.out               |   4 +-
 .../clientpositive/vectorized_casts.q.out       |   2 +-
 .../PrimitiveObjectInspectorConverter.java      |   3 +-
 .../PrimitiveObjectInspectorUtils.java          |   3 +-
 .../TestObjectInspectorConverters.java          |  46 +-
 .../TestPrimitiveObjectInspectorUtils.java      |  14 +
 175 files changed, 1649 insertions(+), 737 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/5e7a8b59/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/5e7a8b59/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/5e7a8b59/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/5e7a8b59/ql/src/test/results/clientpositive/llap/insert_into_default_keyword.q.out
----------------------------------------------------------------------


[38/50] [abbrv] hive git commit: HIVE-20046 : remove NUM_FILES check (Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-20046 : remove NUM_FILES check (Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ddef8952
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ddef8952
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ddef8952

Branch: refs/heads/master
Commit: ddef89525eaef9f5eeea4029475a24b891a41df7
Parents: 31ee870
Author: sergey <se...@apache.org>
Authored: Sat Jul 21 12:32:43 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Sat Jul 21 12:32:43 2018 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hive/metastore/ObjectStore.java    | 7 -------
 1 file changed, 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/ddef8952/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index b319e68..9c46b68 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -12473,13 +12473,6 @@ public class ObjectStore implements RawStore, Configurable {
       return false;
     }
 
-    // TODO## NUM_FILES could also be set to 0 by invalid update. We need to have a negative test. Or remove this and fix stuff.
-    // If the NUM_FILES of the table/partition is 0, return 'true' from this method.
-    // Since newly initialized empty table has 0 for the parameter.
-    if (Long.parseLong(statsParams.get(StatsSetupConst.NUM_FILES)) == 0) {
-      return true;
-    }
-
     if (queryValidWriteIdList != null) { // Can be null when stats are being reset to invalid.
       ValidWriteIdList list4TheQuery = new ValidReaderWriteIdList(queryValidWriteIdList);
       // Just check if the write ID is valid. If it's valid (i.e. we are allowed to see it),


[48/50] [abbrv] hive git commit: HIVE-19532 : fix tests for master-txnstats branch - fix one more out (Sergey Shelukhin)

Posted by se...@apache.org.
HIVE-19532 : fix tests for master-txnstats branch - fix one more out  (Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7bd688b2
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7bd688b2
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7bd688b2

Branch: refs/heads/master
Commit: 7bd688b27a6988c9a242015eeb4450784c164049
Parents: 9f9ae73
Author: sergey <se...@apache.org>
Authored: Tue Jul 24 12:39:25 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Tue Jul 24 12:39:25 2018 -0700

----------------------------------------------------------------------
 .../test/queries/clientpositive/stats_part2.q   | 12 +++------
 .../results/clientpositive/stats_part2.q.out    | 28 ++++++++++----------
 2 files changed, 17 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/7bd688b2/ql/src/test/queries/clientpositive/stats_part2.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/stats_part2.q b/ql/src/test/queries/clientpositive/stats_part2.q
index 24be218..068e928 100644
--- a/ql/src/test/queries/clientpositive/stats_part2.q
+++ b/ql/src/test/queries/clientpositive/stats_part2.q
@@ -15,6 +15,8 @@ set hive.support.concurrency=true;
 set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager;
 set hive.query.results.cache.enabled=false;
 
+set metastore.aggregate.stats.cache.enabled=false;
+
 -- create source.
 drop table if exists mysource;
 create table mysource (p int, key int, value string);
@@ -22,22 +24,14 @@ insert into mysource values (100,20,'value20'), (101,40,'string40'), (102,50,'st
 insert into mysource values (100,21,'value21'), (101,41,'value41'), (102,51,'value51');
 
 -- test partitioned table
-drop table if exists stats_partitioned;
+drop table if exists stats_part;
 
---create table stats_part(key int,value string) partitioned by (p int) stored as orc;
 create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true");
---create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true", "transactional_properties"="insert_only");
 
---explain select count(*) from stats_part;
---select count(*) from stats_part;
---explain select count(*) from stats_part where p = 100;
---select count(*) from stats_part where p = 100;
 explain select count(*) from stats_part where p > 100;
 explain select max(key) from stats_part where p > 100;
---select count(*) from stats_part where p > 100;
 desc formatted stats_part;
 
---explain insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100;
 insert into table stats_part partition(p=100) select distinct key, value from mysource where p == 100;
 insert into table stats_part partition(p=101) select distinct key, value from mysource where p == 101;
 insert into table stats_part partition(p=102) select distinct key, value from mysource where p == 102;

http://git-wip-us.apache.org/repos/asf/hive/blob/7bd688b2/ql/src/test/results/clientpositive/stats_part2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/stats_part2.q.out b/ql/src/test/results/clientpositive/stats_part2.q.out
index 9c22ce7..dfdf7b3 100644
--- a/ql/src/test/results/clientpositive/stats_part2.q.out
+++ b/ql/src/test/results/clientpositive/stats_part2.q.out
@@ -32,9 +32,9 @@ POSTHOOK: Output: default@mysource
 POSTHOOK: Lineage: mysource.key SCRIPT []
 POSTHOOK: Lineage: mysource.p SCRIPT []
 POSTHOOK: Lineage: mysource.value SCRIPT []
-PREHOOK: query: drop table if exists stats_partitioned
+PREHOOK: query: drop table if exists stats_part
 PREHOOK: type: DROPTABLE
-POSTHOOK: query: drop table if exists stats_partitioned
+POSTHOOK: query: drop table if exists stats_part
 POSTHOOK: type: DROPTABLE
 PREHOOK: query: create table stats_part(key int,value string) partitioned by (p int) stored as orc tblproperties ("transactional"="true")
 PREHOOK: type: CREATETABLE
@@ -594,19 +594,19 @@ STAGE PLANS:
           TableScan
             alias: stats_part
             filterExpr: (p > 100) (type: boolean)
-            Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
+            Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: key (type: int)
               outputColumnNames: key
-              Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: PARTIAL
+              Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
                 aggregations: max(key)
                 mode: hash
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   sort order: 
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                   value expressions: _col0 (type: int)
       Execution mode: vectorized
       Reduce Operator Tree:
@@ -614,10 +614,10 @@ STAGE PLANS:
           aggregations: max(VALUE._col0)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -1185,19 +1185,19 @@ STAGE PLANS:
           TableScan
             alias: stats_part
             filterExpr: (p > 100) (type: boolean)
-            Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: PARTIAL
+            Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
             Select Operator
               expressions: key (type: int)
               outputColumnNames: key
-              Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: PARTIAL
+              Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE
               Group By Operator
                 aggregations: max(key)
                 mode: hash
                 outputColumnNames: _col0
-                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+                Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                 Reduce Output Operator
                   sort order: 
-                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+                  Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
                   value expressions: _col0 (type: int)
       Execution mode: vectorized
       Reduce Operator Tree:
@@ -1205,10 +1205,10 @@ STAGE PLANS:
           aggregations: max(VALUE._col0)
           mode: mergepartial
           outputColumnNames: _col0
-          Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+          Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
           File Output Operator
             compressed: false
-            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: PARTIAL
+            Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE
             table:
                 input format: org.apache.hadoop.mapred.SequenceFileInputFormat
                 output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat


[42/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0721-2

Posted by se...@apache.org.
HIVE-19416 : merge master into branch (Sergey Shelukhin) 0721-2


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2cbe1331
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2cbe1331
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2cbe1331

Branch: refs/heads/master
Commit: 2cbe1331336222bf77743d75ddd419168a03200d
Parents: 64d75a4 cce3a05
Author: sergey <se...@apache.org>
Authored: Sat Jul 21 13:46:10 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Sat Jul 21 13:46:10 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hive/llap/LlapCacheAwareFs.java   | 12 +++++++++---
 .../org/apache/hadoop/hive/ql/io/HiveInputFormat.java   |  7 ++++---
 2 files changed, 13 insertions(+), 6 deletions(-)
----------------------------------------------------------------------



[21/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
index 0000000,73a518d..95e8445
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
@@@ -1,0 -1,1682 +1,1719 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import org.apache.hadoop.hive.common.TableName;
 -import org.apache.hadoop.hive.metastore.api.CreationMetadata;
 -import org.apache.hadoop.hive.metastore.api.ISchemaName;
 -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
 -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WriteEventInfo;
++import org.apache.hadoop.hive.metastore.api.*;
+ 
+ import java.lang.annotation.ElementType;
+ import java.lang.annotation.Retention;
+ import java.lang.annotation.RetentionPolicy;
+ import java.lang.annotation.Target;
+ import java.nio.ByteBuffer;
+ import java.util.List;
+ import java.util.Map;
+ 
+ import org.apache.hadoop.classification.InterfaceStability;
+ import org.apache.hadoop.conf.Configurable;
 -import org.apache.hadoop.hive.metastore.api.AggrStats;
 -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 -import org.apache.hadoop.hive.metastore.api.Catalog;
 -import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 -import org.apache.hadoop.hive.metastore.api.Database;
 -import org.apache.hadoop.hive.metastore.api.FieldSchema;
 -import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 -import org.apache.hadoop.hive.metastore.api.Function;
 -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
 -import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
 -import org.apache.hadoop.hive.metastore.api.ISchema;
 -import org.apache.hadoop.hive.metastore.api.InvalidInputException;
 -import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
 -import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
 -import org.apache.hadoop.hive.metastore.api.MetaException;
 -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 -import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
 -import org.apache.hadoop.hive.metastore.api.Partition;
 -import org.apache.hadoop.hive.metastore.api.PartitionEventType;
 -import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
 -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 -import org.apache.hadoop.hive.metastore.api.PrincipalType;
 -import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 -import org.apache.hadoop.hive.metastore.api.Role;
 -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
 -import org.apache.hadoop.hive.metastore.api.RuntimeStat;
 -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
 -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
 -import org.apache.hadoop.hive.metastore.api.SchemaVersion;
 -import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 -import org.apache.hadoop.hive.metastore.api.Table;
 -import org.apache.hadoop.hive.metastore.api.TableMeta;
 -import org.apache.hadoop.hive.metastore.api.Type;
 -import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
 -import org.apache.hadoop.hive.metastore.api.UnknownTableException;
 -import org.apache.hadoop.hive.metastore.api.WMMapping;
 -import org.apache.hadoop.hive.metastore.api.WMNullablePool;
 -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WMPool;
 -import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WMTrigger;
 -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.ColStatsObjWithSourceInfo;
+ import org.apache.thrift.TException;
+ 
+ public interface RawStore extends Configurable {
+ 
+   /***
+    * Annotation to skip retries
+    */
+   @Target(value = ElementType.METHOD)
+   @Retention(value = RetentionPolicy.RUNTIME)
+   @interface CanNotRetry {
+   }
+ 
+   void shutdown();
+ 
+   /**
+    * Opens a new one or the one already created Every call of this function must
+    * have corresponding commit or rollback function call
+    *
+    * @return an active transaction
+    */
+ 
+   boolean openTransaction();
+ 
+   /**
+    * if this is the commit of the first open call then an actual commit is
+    * called.
+    *
+    * @return true or false
+    */
+   @CanNotRetry
+   boolean commitTransaction();
+ 
+   boolean isActiveTransaction();
+ 
+   /**
+    * Rolls back the current transaction if it is active
+    */
+   @CanNotRetry
+   void rollbackTransaction();
+ 
+   /**
+    * Create a new catalog.
+    * @param cat Catalog to create.
+    * @throws MetaException if something goes wrong, usually in storing it to the database.
+    */
+   void createCatalog(Catalog cat) throws MetaException;
+ 
+   /**
+    * Alter an existing catalog.  Only description and location can be changed, and the change of
+    * location is for internal use only.
+    * @param catName name of the catalog to alter.
+    * @param cat new version of the catalog.
+    * @throws MetaException something went wrong, usually in the database.
+    * @throws InvalidOperationException attempt to change something about the catalog that is not
+    * changeable, like the name.
+    */
+   void alterCatalog(String catName, Catalog cat) throws MetaException, InvalidOperationException;
+ 
+   /**
+    * Get a catalog.
+    * @param catalogName name of the catalog.
+    * @return The catalog.
+    * @throws NoSuchObjectException no catalog of this name exists.
+    * @throws MetaException if something goes wrong, usually in reading it from the database.
+    */
+   Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Get all the catalogs.
+    * @return list of names of all catalogs in the system
+    * @throws MetaException if something goes wrong, usually in reading from the database.
+    */
+   List<String> getCatalogs() throws MetaException;
+ 
+   /**
+    * Drop a catalog.  The catalog must be empty.
+    * @param catalogName name of the catalog to drop.
+    * @throws NoSuchObjectException no catalog of this name exists.
+    * @throws MetaException could mean the catalog isn't empty, could mean general database error.
+    */
+   void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Create a database.
+    * @param db database to create.
+    * @throws InvalidObjectException not sure it actually ever throws this.
+    * @throws MetaException if something goes wrong, usually in writing it to the database.
+    */
+   void createDatabase(Database db)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Get a database.
+    * @param catalogName catalog the database is in.
+    * @param name name of the database.
+    * @return the database.
+    * @throws NoSuchObjectException if no such database exists.
+    */
+   Database getDatabase(String catalogName, String name)
+       throws NoSuchObjectException;
+ 
+   /**
+    * Drop a database.
+    * @param catalogName catalog the database is in.
+    * @param dbname name of the database.
+    * @return true if the database was dropped, pretty much always returns this if it returns.
+    * @throws NoSuchObjectException no database in this catalog of this name to drop
+    * @throws MetaException something went wrong, usually with the database.
+    */
+   boolean dropDatabase(String catalogName, String dbname)
+       throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Alter a database.
+    * @param catalogName name of the catalog the database is in.
+    * @param dbname name of the database to alter
+    * @param db new version of the database.  This should be complete as it will fully replace the
+    *          existing db object.
+    * @return true if the change succeeds, could fail due to db constraint violations.
+    * @throws NoSuchObjectException no database of this name exists to alter.
+    * @throws MetaException something went wrong, usually with the database.
+    */
+   boolean alterDatabase(String catalogName, String dbname, Database db)
+       throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Get all database in a catalog having names that match a pattern.
+    * @param catalogName name of the catalog to search for databases in
+    * @param pattern pattern names should match
+    * @return list of matching database names.
+    * @throws MetaException something went wrong, usually with the database.
+    */
+   List<String> getDatabases(String catalogName, String pattern) throws MetaException;
+ 
+   /**
+    * Get names of all the databases in a catalog.
+    * @param catalogName name of the catalog to search for databases in
+    * @return list of names of all databases in the catalog
+    * @throws MetaException something went wrong, usually with the database.
+    */
+   List<String> getAllDatabases(String catalogName) throws MetaException;
+ 
+   boolean createType(Type type);
+ 
+   Type getType(String typeName);
+ 
+   boolean dropType(String typeName);
+ 
+   void createTable(Table tbl) throws InvalidObjectException,
+       MetaException;
+ 
+   /**
+    * Drop a table.
+    * @param catalogName catalog the table is in
+    * @param dbName database the table is in
+    * @param tableName table name
+    * @return true if the table was dropped
+    * @throws MetaException something went wrong, usually in the RDBMS or storage
+    * @throws NoSuchObjectException No table of this name
+    * @throws InvalidObjectException Don't think this is ever actually thrown
+    * @throws InvalidInputException Don't think this is ever actually thrown
+    */
+   boolean dropTable(String catalogName, String dbName, String tableName)
+       throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException;
+ 
+   /**
+    * Get a table object.
+    * @param catalogName catalog the table is in.
+    * @param dbName database the table is in.
+    * @param tableName table name.
+    * @return table object, or null if no such table exists (wow it would be nice if we either
+    * consistently returned null or consistently threw NoSuchObjectException).
+    * @throws MetaException something went wrong in the RDBMS
+    */
+   Table getTable(String catalogName, String dbName, String tableName) throws MetaException;
+ 
+   /**
++   * Get a table object.
++   * @param catalogName catalog the table is in.
++   * @param dbName database the table is in.
++   * @param tableName table name.
++   * @param txnId transaction id of the calling transaction
++   * @param writeIdList string format of valid writeId transaction list
++   * @return table object, or null if no such table exists (wow it would be nice if we either
++   * consistently returned null or consistently threw NoSuchObjectException).
++   * @throws MetaException something went wrong in the RDBMS
++   */
++  Table getTable(String catalogName, String dbName, String tableName,
++                 long txnId, String writeIdList) throws MetaException;
++
++  /**
+    * Add a partition.
+    * @param part partition to add
+    * @return true if the partition was successfully added.
+    * @throws InvalidObjectException the provided partition object is not valid.
+    * @throws MetaException error writing to the RDBMS.
+    */
+   boolean addPartition(Partition part)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Add a list of partitions to a table.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param parts list of partitions to be added.
+    * @return true if the operation succeeded.
+    * @throws InvalidObjectException never throws this AFAICT
+    * @throws MetaException the partitions don't belong to the indicated table or error writing to
+    * the RDBMS.
+    */
+   boolean addPartitions(String catName, String dbName, String tblName, List<Partition> parts)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Add a list of partitions to a table.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param partitionSpec specification for the partition
+    * @param ifNotExists whether it is in an error if the partition already exists.  If true, then
+    *                   it is not an error if the partition exists, if false, it is.
+    * @return whether the partition was created.
+    * @throws InvalidObjectException The passed in partition spec or table specification is invalid.
+    * @throws MetaException error writing to RDBMS.
+    */
+   boolean addPartitions(String catName, String dbName, String tblName,
+                         PartitionSpecProxy partitionSpec, boolean ifNotExists)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Get a partition.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tableName table name.
+    * @param part_vals partition values for this table.
+    * @return the partition.
+    * @throws MetaException error reading from RDBMS.
+    * @throws NoSuchObjectException no partition matching this specification exists.
+    */
+   Partition getPartition(String catName, String dbName, String tableName,
+       List<String> part_vals) throws MetaException, NoSuchObjectException;
++  /**
++   * Get a partition.
++   * @param catName catalog name.
++   * @param dbName database name.
++   * @param tableName table name.
++   * @param part_vals partition values for this table.
++   * @param txnId transaction id of the calling transaction
++   * @param writeIdList string format of valid writeId transaction list
++   * @return the partition.
++   * @throws MetaException error reading from RDBMS.
++   * @throws NoSuchObjectException no partition matching this specification exists.
++   */
++  Partition getPartition(String catName, String dbName, String tableName,
++                         List<String> part_vals,
++                         long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Check whether a partition exists.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tableName table name.
+    * @param partKeys list of partition keys used to generate the partition name.
+    * @param part_vals list of partition values.
+    * @return true if the partition exists, false otherwise.
+    * @throws MetaException failure reading RDBMS
+    * @throws NoSuchObjectException this is never thrown.
+    */
+   boolean doesPartitionExist(String catName, String dbName, String tableName,
+       List<FieldSchema> partKeys, List<String> part_vals)
+       throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Drop a partition.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tableName table name.
+    * @param part_vals list of partition values.
+    * @return true if the partition was dropped.
+    * @throws MetaException Error accessing the RDBMS.
+    * @throws NoSuchObjectException no partition matching this description exists
+    * @throws InvalidObjectException error dropping the statistics for the partition
+    * @throws InvalidInputException error dropping the statistics for the partition
+    */
+   boolean dropPartition(String catName, String dbName, String tableName,
+       List<String> part_vals) throws MetaException, NoSuchObjectException, InvalidObjectException,
+       InvalidInputException;
+ 
+   /**
+    * Get some or all partitions for a table.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tableName table name
+    * @param max maximum number of partitions, or -1 to get all partitions.
+    * @return list of partitions
+    * @throws MetaException error access the RDBMS.
+    * @throws NoSuchObjectException no such table exists
+    */
+   List<Partition> getPartitions(String catName, String dbName,
+       String tableName, int max) throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Get the location for every partition of a given table. If a partition location is a child of
+    * baseLocationToNotShow then the partitionName is returned, but the only null location is
+    * returned.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param baseLocationToNotShow Partition locations which are child of this path are omitted, and
+    *     null value returned instead.
+    * @param max The maximum number of partition locations returned, or -1 for all
+    * @return The map of the partitionName, location pairs
+    */
+   Map<String, String> getPartitionLocations(String catName, String dbName, String tblName,
+       String baseLocationToNotShow, int max);
+ 
+   /**
+    * Alter a table.
+    * @param catName catalog the table is in.
+    * @param dbname database the table is in.
+    * @param name name of the table.
+    * @param newTable New table object.  Which parts of the table can be altered are
+    *                 implementation specific.
+    * @throws InvalidObjectException The new table object is invalid.
+    * @throws MetaException something went wrong, usually in the RDBMS or storage.
+    */
 -  void alterTable(String catName, String dbname, String name, Table newTable)
++  void alterTable(String catName, String dbname, String name, Table newTable,
++      long queryTxnId, String queryValidWriteIds)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Update creation metadata for a materialized view.
+    * @param catName catalog name.
+    * @param dbname database name.
+    * @param tablename table name.
+    * @param cm new creation metadata
+    * @throws MetaException error accessing the RDBMS.
+    */
+   void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm)
+       throws MetaException;
+ 
+   /**
+    * Get table names that match a pattern.
+    * @param catName catalog to search in
+    * @param dbName database to search in
+    * @param pattern pattern to match
+    * @return list of table names, if any
+    * @throws MetaException failure in querying the RDBMS
+    */
+   List<String> getTables(String catName, String dbName, String pattern)
+       throws MetaException;
+ 
+   /**
+    * Get table names that match a pattern.
+    * @param catName catalog to search in
+    * @param dbName database to search in
+    * @param pattern pattern to match
+    * @param tableType type of table to look for
+    * @return list of table names, if any
+    * @throws MetaException failure in querying the RDBMS
+    */
+   List<String> getTables(String catName, String dbName, String pattern, TableType tableType)
+       throws MetaException;
+ 
+   /**
+    * Get list of materialized views in a database.
+    * @param catName catalog name
+    * @param dbName database name
+    * @return names of all materialized views in the database
+    * @throws MetaException error querying the RDBMS
+    * @throws NoSuchObjectException no such database
+    */
+   List<String> getMaterializedViewsForRewriting(String catName, String dbName)
+       throws MetaException, NoSuchObjectException;
+ 
+   /**
+ 
+    * @param catName catalog name to search in. Search must be confined to one catalog.
+    * @param dbNames databases to search in.
+    * @param tableNames names of tables to select.
+    * @param tableTypes types of tables to look for.
+    * @return list of matching table meta information.
+    * @throws MetaException failure in querying the RDBMS.
+    */
+   List<TableMeta> getTableMeta(String catName, String dbNames, String tableNames,
+                                List<String> tableTypes) throws MetaException;
+ 
+   /**
+    * @param catName catalog name
+    * @param dbname
+    *        The name of the database from which to retrieve the tables
+    * @param tableNames
+    *        The names of the tables to retrieve.
+    * @return A list of the tables retrievable from the database
+    *          whose names are in the list tableNames.
+    *         If there are duplicate names, only one instance of the table will be returned
+    * @throws MetaException failure in querying the RDBMS.
+    */
+   List<Table> getTableObjectsByName(String catName, String dbname, List<String> tableNames)
+       throws MetaException, UnknownDBException;
+ 
+   /**
+    * Get all tables in a database.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @return list of table names
+    * @throws MetaException failure in querying the RDBMS.
+    */
+   List<String> getAllTables(String catName, String dbName) throws MetaException;
+ 
+   /**
+    * Gets a list of tables based on a filter string and filter type.
+    * @param catName catalog name
+    * @param dbName
+    *          The name of the database from which you will retrieve the table names
+    * @param filter
+    *          The filter string
+    * @param max_tables
+    *          The maximum number of tables returned
+    * @return  A list of table names that match the desired filter
+    * @throws MetaException
+    * @throws UnknownDBException
+    */
+   List<String> listTableNamesByFilter(String catName, String dbName, String filter,
+                                       short max_tables) throws MetaException, UnknownDBException;
+ 
+   /**
+    * Get a partial or complete list of names for partitions of a table.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param max_parts maximum number of partitions to retrieve, -1 for all.
+    * @return list of partition names.
+    * @throws MetaException there was an error accessing the RDBMS
+    */
+   List<String> listPartitionNames(String catName, String db_name,
+       String tbl_name, short max_parts) throws MetaException;
+ 
+   /**
+    * Get a list of partition values as one big struct.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param cols partition key columns
+    * @param applyDistinct whether to apply distinct to the list
+    * @param filter filter to apply to the partition names
+    * @param ascending whether to put in ascending order
+    * @param order whether to order
+    * @param maxParts maximum number of parts to return, or -1 for all
+    * @return struct with all of the partition value information
+    * @throws MetaException error access the RDBMS
+    */
+   PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name,
+                                               List<FieldSchema> cols, boolean applyDistinct, String filter, boolean ascending,
+                                               List<FieldSchema> order, long maxParts) throws MetaException;
+ 
+   /**
+    * Alter a partition.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param part_vals partition values that describe the partition.
+    * @param new_part new partition object.  This should be a complete copy of the old with
+    *                 changes values, not just the parts to update.
+    * @throws InvalidObjectException No such partition.
+    * @throws MetaException error accessing the RDBMS.
+    */
+   void alterPartition(String catName, String db_name, String tbl_name, List<String> part_vals,
 -      Partition new_part) throws InvalidObjectException, MetaException;
++      Partition new_part, long queryTxnId, String queryValidWriteIds)
++          throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Alter a set of partitions.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param part_vals_list list of list of partition values.  Each outer list describes one
+    *                       partition (with its list of partition values).
+    * @param new_parts list of new partitions.  The order must match the old partitions described in
+    *                  part_vals_list.  Each of these should be a complete copy of the new
+    *                  partition, not just the pieces to update.
++   * @param txnId transaction id of the transaction that called this method.
++   * @param writeIdList valid write id list of the transaction on the current table
++   * @param writeid write id of the transaction for the table
+    * @throws InvalidObjectException One of the indicated partitions does not exist.
+    * @throws MetaException error accessing the RDBMS.
+    */
+   void alterPartitions(String catName, String db_name, String tbl_name,
 -      List<List<String>> part_vals_list, List<Partition> new_parts)
++      List<List<String>> part_vals_list, List<Partition> new_parts, long writeId,
++      long queryTxnId, String queryValidWriteIds)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Get partitions with a filter.  This is a portion of the SQL where clause.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tblName table name
+    * @param filter SQL where clause filter
+    * @param maxParts maximum number of partitions to return, or -1 for all.
+    * @return list of partition objects matching the criteria
+    * @throws MetaException Error accessing the RDBMS or processing the filter.
+    * @throws NoSuchObjectException no such table.
+    */
+   List<Partition> getPartitionsByFilter(
+       String catName, String dbName, String tblName, String filter, short maxParts)
+       throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Get partitions using an already parsed expression.
+    * @param catName catalog name.
+    * @param dbName database name
+    * @param tblName table name
+    * @param expr an already parsed Hive expression
+    * @param defaultPartitionName default name of a partition
+    * @param maxParts maximum number of partitions to return, or -1 for all
+    * @param result list to place resulting partitions in
+    * @return true if the result contains unknown partitions.
+    * @throws TException error executing the expression
+    */
+   boolean getPartitionsByExpr(String catName, String dbName, String tblName,
+       byte[] expr, String defaultPartitionName, short maxParts, List<Partition> result)
+       throws TException;
+ 
+   /**
+    * Get the number of partitions that match a provided SQL filter.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param filter filter from Hive's SQL where clause
+    * @return number of matching partitions.
+    * @throws MetaException error accessing the RDBMS or executing the filter
+    * @throws NoSuchObjectException no such table
+    */
+   int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter)
+     throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Get the number of partitions that match an already parsed expression.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param expr an already parsed Hive expression
+    * @return number of matching partitions.
+    * @throws MetaException error accessing the RDBMS or working with the expression.
+    * @throws NoSuchObjectException no such table.
+    */
+   int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr)
+       throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Get partitions by name.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param partNames list of partition names.  These are names not values, so they will include
+    *                  both the key and the value.
+    * @return list of matching partitions
+    * @throws MetaException error accessing the RDBMS.
+    * @throws NoSuchObjectException No such table.
+    */
+   List<Partition> getPartitionsByNames(String catName, String dbName, String tblName,
+                                        List<String> partNames)
+       throws MetaException, NoSuchObjectException;
+ 
+   Table markPartitionForEvent(String catName, String dbName, String tblName, Map<String,String> partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException;
+ 
+   boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName, Map<String, String> partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException;
+ 
+   boolean addRole(String rowName, String ownerName)
+       throws InvalidObjectException, MetaException, NoSuchObjectException;
+ 
+   boolean removeRole(String roleName) throws MetaException, NoSuchObjectException;
+ 
+   boolean grantRole(Role role, String userName, PrincipalType principalType,
+       String grantor, PrincipalType grantorType, boolean grantOption)
+       throws MetaException, NoSuchObjectException, InvalidObjectException;
+ 
+   boolean revokeRole(Role role, String userName, PrincipalType principalType,
+       boolean grantOption) throws MetaException, NoSuchObjectException;
+ 
+   PrincipalPrivilegeSet getUserPrivilegeSet(String userName,
+       List<String> groupNames) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Get privileges for a database for a user.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param userName user name
+    * @param groupNames list of groups the user is in
+    * @return privileges for that user on indicated database
+    * @throws InvalidObjectException no such database
+    * @throws MetaException error accessing the RDBMS
+    */
+   PrincipalPrivilegeSet getDBPrivilegeSet (String catName, String dbName, String userName,
+       List<String> groupNames)  throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Get privileges for a table for a user.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param userName user name
+    * @param groupNames list of groups the user is in
+    * @return privileges for that user on indicated table
+    * @throws InvalidObjectException no such table
+    * @throws MetaException error accessing the RDBMS
+    */
+   PrincipalPrivilegeSet getTablePrivilegeSet (String catName, String dbName, String tableName,
+       String userName, List<String> groupNames) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Get privileges for a partition for a user.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param partition partition name
+    * @param userName user name
+    * @param groupNames list of groups the user is in
+    * @return privileges for that user on indicated partition
+    * @throws InvalidObjectException no such partition
+    * @throws MetaException error accessing the RDBMS
+    */
+   PrincipalPrivilegeSet getPartitionPrivilegeSet (String catName, String dbName, String tableName,
+       String partition, String userName, List<String> groupNames) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Get privileges for a column in a table or partition for a user.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param partitionName partition name, or null for table level column permissions
+    * @param columnName column name
+    * @param userName user name
+    * @param groupNames list of groups the user is in
+    * @return privileges for that user on indicated column in the table or partition
+    * @throws InvalidObjectException no such table, partition, or column
+    * @throws MetaException error accessing the RDBMS
+    */
+   PrincipalPrivilegeSet getColumnPrivilegeSet (String catName, String dbName, String tableName, String partitionName,
+       String columnName, String userName, List<String> groupNames) throws InvalidObjectException, MetaException;
+ 
+   List<HiveObjectPrivilege> listPrincipalGlobalGrants(String principalName,
+       PrincipalType principalType);
+ 
+   /**
+    * For a given principal name and type, list the DB Grants
+    * @param principalName principal name
+    * @param principalType type
+    * @param catName catalog name
+    * @param dbName database name
+    * @return list of privileges for that principal on the specified database.
+    */
+   List<HiveObjectPrivilege> listPrincipalDBGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName);
+ 
+   /**
+    * For a given principal name and type, list the Table Grants
+    * @param principalName principal name
+    * @param principalType type
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @return list of privileges for that principal on the specified database.
+    */
+   List<HiveObjectPrivilege> listAllTableGrants(
+       String principalName, PrincipalType principalType, String catName, String dbName,
+       String tableName);
+ 
+   /**
+    * For a given principal name and type, list the Table Grants
+    * @param principalName principal name
+    * @param principalType type
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param partName partition name (not value)
+    * @return list of privileges for that principal on the specified database.
+    */
+   List<HiveObjectPrivilege> listPrincipalPartitionGrants(
+       String principalName, PrincipalType principalType, String catName, String dbName,
+       String tableName, List<String> partValues, String partName);
+ 
+   /**
+    * For a given principal name and type, list the Table Grants
+    * @param principalName principal name
+    * @param principalType type
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param columnName column name
+    * @return list of privileges for that principal on the specified database.
+    */
+   List<HiveObjectPrivilege> listPrincipalTableColumnGrants(
+       String principalName, PrincipalType principalType, String catName, String dbName,
+       String tableName, String columnName);
+ 
+   /**
+    * For a given principal name and type, list the Table Grants
+    * @param principalName principal name
+    * @param principalType type
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param partName partition name (not value)
+    * @param columnName column name
+    * @return list of privileges for that principal on the specified database.
+    */
+   List<HiveObjectPrivilege> listPrincipalPartitionColumnGrants(
+       String principalName, PrincipalType principalType, String catName, String dbName,
+       String tableName, List<String> partValues, String partName, String columnName);
+ 
+   boolean grantPrivileges (PrivilegeBag privileges)
+       throws InvalidObjectException, MetaException, NoSuchObjectException;
+ 
+   boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption)
+   throws InvalidObjectException, MetaException, NoSuchObjectException;
+ 
+   boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, PrivilegeBag grantPrivileges)
+   throws InvalidObjectException, MetaException, NoSuchObjectException;
+ 
+   org.apache.hadoop.hive.metastore.api.Role getRole(
+       String roleName) throws NoSuchObjectException;
+ 
+   List<String> listRoleNames();
+ 
+   List<Role> listRoles(String principalName,
+       PrincipalType principalType);
+ 
+   List<RolePrincipalGrant> listRolesWithGrants(String principalName,
+                                                       PrincipalType principalType);
+ 
+ 
+   /**
+    * Get the role to principal grant mapping for given role
+    * @param roleName
+    * @return
+    */
+   List<RolePrincipalGrant> listRoleMembers(String roleName);
+ 
+ 
+   /**
+    * Fetch a partition along with privilege information for a particular user.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param partVals partition values
+    * @param user_name user to get privilege information for.
+    * @param group_names groups to get privilege information for.
+    * @return a partition
+    * @throws MetaException error accessing the RDBMS.
+    * @throws NoSuchObjectException no such partition exists
+    * @throws InvalidObjectException error fetching privilege information
+    */
+   Partition getPartitionWithAuth(String catName, String dbName, String tblName,
+       List<String> partVals, String user_name, List<String> group_names)
+       throws MetaException, NoSuchObjectException, InvalidObjectException;
+ 
+   /**
+    * Fetch some or all partitions for a table, along with privilege information for a particular
+    * user.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param maxParts maximum number of partitions to fetch, -1 for all partitions.
+    * @param userName user to get privilege information for.
+    * @param groupNames groups to get privilege information for.
+    * @return list of partitions.
+    * @throws MetaException error access the RDBMS.
+    * @throws NoSuchObjectException no such table exists
+    * @throws InvalidObjectException error fetching privilege information.
+    */
+   List<Partition> getPartitionsWithAuth(String catName, String dbName,
+       String tblName, short maxParts, String userName, List<String> groupNames)
+       throws MetaException, NoSuchObjectException, InvalidObjectException;
+ 
+   /**
+    * Lists partition names that match a given partial specification
+    * @param catName catalog name.
+    * @param db_name
+    *          The name of the database which has the partitions
+    * @param tbl_name
+    *          The name of the table which has the partitions
+    * @param part_vals
+    *          A partial list of values for partitions in order of the table's partition keys.
+    *          Entries can be empty if you only want to specify latter partitions.
+    * @param max_parts
+    *          The maximum number of partitions to return
+    * @return A list of partition names that match the partial spec.
+    * @throws MetaException error accessing RDBMS
+    * @throws NoSuchObjectException No such table exists
+    */
+   List<String> listPartitionNamesPs(String catName, String db_name, String tbl_name,
+       List<String> part_vals, short max_parts)
+       throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Lists partitions that match a given partial specification and sets their auth privileges.
+    *   If userName and groupNames null, then no auth privileges are set.
+    * @param catName catalog name.
+    * @param db_name
+    *          The name of the database which has the partitions
+    * @param tbl_name
+    *          The name of the table which has the partitions
+    * @param part_vals
+    *          A partial list of values for partitions in order of the table's partition keys
+    *          Entries can be empty if you need to specify latter partitions.
+    * @param max_parts
+    *          The maximum number of partitions to return
+    * @param userName
+    *          The user name for the partition for authentication privileges
+    * @param groupNames
+    *          The groupNames for the partition for authentication privileges
+    * @return A list of partitions that match the partial spec.
+    * @throws MetaException error access RDBMS
+    * @throws NoSuchObjectException No such table exists
+    * @throws InvalidObjectException error access privilege information
+    */
+   List<Partition> listPartitionsPsWithAuth(String catName, String db_name, String tbl_name,
+       List<String> part_vals, short max_parts, String userName, List<String> groupNames)
+       throws MetaException, InvalidObjectException, NoSuchObjectException;
+ 
+   /** Persists the given column statistics object to the metastore
+    * @param colStats object to persist
+    * @return Boolean indicating the outcome of the operation
+    * @throws NoSuchObjectException No such table.
+    * @throws MetaException error accessing the RDBMS.
+    * @throws InvalidObjectException the stats object is invalid
+    * @throws InvalidInputException unable to record the stats for the table
+    */
 -  boolean updateTableColumnStatistics(ColumnStatistics colStats)
++  boolean updateTableColumnStatistics(ColumnStatistics colStats, long txnId, String validWriteIds, long writeId)
+       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException;
+ 
+   /** Persists the given column statistics object to the metastore
+    * @param statsObj object to persist
+    * @param partVals partition values to persist the stats for
+    * @return Boolean indicating the outcome of the operation
+    * @throws NoSuchObjectException No such table.
+    * @throws MetaException error accessing the RDBMS.
+    * @throws InvalidObjectException the stats object is invalid
+    * @throws InvalidInputException unable to record the stats for the table
++   * @throws TException
+    */
+   boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,
 -     List<String> partVals)
++     List<String> partVals, long txnId, String validWriteIds, long writeId)
+      throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException;
+ 
+   /**
+    * Returns the relevant column statistics for a given column in a given table in a given database
+    * if such statistics exist.
+    * @param catName catalog name.
+    * @param dbName name of the database, defaults to current database
+    * @param tableName name of the table
+    * @param colName names of the columns for which statistics is requested
+    * @return Relevant column statistics for the column for the given table
+    * @throws NoSuchObjectException No such table
+    * @throws MetaException error accessing the RDBMS
+    *
+    */
+   ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName,
+     List<String> colName) throws MetaException, NoSuchObjectException;
+ 
+   /**
++   * Returns the relevant column statistics for a given column in a given table in a given database
++   * if such statistics exist.
++   * @param catName catalog name.
++   * @param dbName name of the database, defaults to current database
++   * @param tableName name of the table
++   * @param colName names of the columns for which statistics is requested
++   * @param txnId transaction id of the calling transaction
++   * @param writeIdList string format of valid writeId transaction list
++   * @return Relevant column statistics for the column for the given table
++   * @throws NoSuchObjectException No such table
++   * @throws MetaException error accessing the RDBMS
++   *
++   */
++  ColumnStatistics getTableColumnStatistics(
++    String catName, String dbName, String tableName,
++    List<String> colName, long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException;
++
++  /**
+    * Get statistics for a partition for a set of columns.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param partNames list of partition names.  These are names so must be key1=val1[/key2=val2...]
+    * @param colNames list of columns to get stats for
+    * @return list of statistics objects
+    * @throws MetaException error accessing the RDBMS
+    * @throws NoSuchObjectException no such partition.
+    */
+   List<ColumnStatistics> getPartitionColumnStatistics(
+      String catName, String dbName, String tblName, List<String> partNames, List<String> colNames)
+       throws MetaException, NoSuchObjectException;
+ 
+   /**
++   * Get statistics for a partition for a set of columns.
++   * @param catName catalog name.
++   * @param dbName database name.
++   * @param tblName table name.
++   * @param partNames list of partition names.  These are names so must be key1=val1[/key2=val2...]
++   * @param colNames list of columns to get stats for
++   * @param txnId transaction id of the calling transaction
++   * @param writeIdList string format of valid writeId transaction list
++   * @return list of statistics objects
++   * @throws MetaException error accessing the RDBMS
++   * @throws NoSuchObjectException no such partition.
++   */
++  List<ColumnStatistics> getPartitionColumnStatistics(
++      String catName, String dbName, String tblName,
++      List<String> partNames, List<String> colNames,
++      long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException;
++
++  /**
+    * Deletes column statistics if present associated with a given db, table, partition and col. If
+    * null is passed instead of a colName, stats when present for all columns associated
+    * with a given db, table and partition are deleted.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tableName table name.
+    * @param partName partition name.
+    * @param partVals partition values.
+    * @param colName column name.
+    * @return Boolean indicating the outcome of the operation
+    * @throws NoSuchObjectException no such partition
+    * @throws MetaException error access the RDBMS
+    * @throws InvalidObjectException error dropping the stats
+    * @throws InvalidInputException bad input, such as null table or database name.
+    */
+   boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName,
+       String partName, List<String> partVals, String colName)
+       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException;
+ 
+   /**
+    * Delete statistics for a single column or all columns in a table.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param colName column name.  Null to delete stats for all columns in the table.
+    * @return true if the statistics were deleted.
+    * @throws NoSuchObjectException no such table or column.
+    * @throws MetaException error access the RDBMS.
+    * @throws InvalidObjectException error dropping the stats
+    * @throws InvalidInputException bad inputs, such as null table name.
+    */
+   boolean deleteTableColumnStatistics(String catName, String dbName, String tableName,
+                                       String colName)
+     throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException;
+ 
+   long cleanupEvents();
+ 
+   boolean addToken(String tokenIdentifier, String delegationToken);
+ 
+   boolean removeToken(String tokenIdentifier);
+ 
+   String getToken(String tokenIdentifier);
+ 
+   List<String> getAllTokenIdentifiers();
+ 
+   int addMasterKey(String key) throws MetaException;
+ 
+   void updateMasterKey(Integer seqNo, String key)
+      throws NoSuchObjectException, MetaException;
+ 
+   boolean removeMasterKey(Integer keySeq);
+ 
+   String[] getMasterKeys();
+ 
+   void verifySchema() throws MetaException;
+ 
+   String getMetaStoreSchemaVersion() throws  MetaException;
+ 
+   abstract void setMetaStoreSchemaVersion(String version, String comment) throws MetaException;
+ 
+   /**
+    * Drop a list of partitions.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name
+    * @param partNames list of partition names.
+    * @throws MetaException error access RDBMS or storage.
+    * @throws NoSuchObjectException One or more of the partitions does not exist.
+    */
+   void dropPartitions(String catName, String dbName, String tblName, List<String> partNames)
+       throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * List all DB grants for a given principal.
+    * @param principalName principal name
+    * @param principalType type
+    * @return all DB grants for this principal
+    */
+   List<HiveObjectPrivilege> listPrincipalDBGrantsAll(
+       String principalName, PrincipalType principalType);
+ 
+   /**
+    * List all Table grants for a given principal
+    * @param principalName principal name
+    * @param principalType type
+    * @return all Table grants for this principal
+    */
+   List<HiveObjectPrivilege> listPrincipalTableGrantsAll(
+       String principalName, PrincipalType principalType);
+ 
+   /**
+    * List all Partition grants for a given principal
+    * @param principalName principal name
+    * @param principalType type
+    * @return all Partition grants for this principal
+    */
+   List<HiveObjectPrivilege> listPrincipalPartitionGrantsAll(
+       String principalName, PrincipalType principalType);
+ 
+   /**
+    * List all Table column grants for a given principal
+    * @param principalName principal name
+    * @param principalType type
+    * @return all Table column grants for this principal
+    */
+   List<HiveObjectPrivilege> listPrincipalTableColumnGrantsAll(
+       String principalName, PrincipalType principalType);
+ 
+   /**
+    * List all Partition column grants for a given principal
+    * @param principalName principal name
+    * @param principalType type
+    * @return all Partition column grants for this principal
+    */
+   List<HiveObjectPrivilege> listPrincipalPartitionColumnGrantsAll(
+       String principalName, PrincipalType principalType);
+ 
+   List<HiveObjectPrivilege> listGlobalGrantsAll();
+ 
+   /**
+    * Find all the privileges for a given database.
+    * @param catName catalog name
+    * @param dbName database name
+    * @return list of all privileges.
+    */
+   List<HiveObjectPrivilege> listDBGrantsAll(String catName, String dbName);
+ 
+   /**
+    * Find all of the privileges for a given column in a given partition.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param partitionName partition name (not value)
+    * @param columnName column name
+    * @return all privileges on this column in this partition
+    */
+   List<HiveObjectPrivilege> listPartitionColumnGrantsAll(
+       String catName, String dbName, String tableName, String partitionName, String columnName);
+ 
+   /**
+    * Find all of the privileges for a given table
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @return all privileges on this table
+    */
+   List<HiveObjectPrivilege> listTableGrantsAll(String catName, String dbName, String tableName);
+ 
+   /**
+    * Find all of the privileges for a given partition.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param partitionName partition name (not value)
+    * @return all privileges on this partition
+    */
+   List<HiveObjectPrivilege> listPartitionGrantsAll(
+       String catName, String dbName, String tableName, String partitionName);
+ 
+   /**
+    * Find all of the privileges for a given column in a given table.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param columnName column name
+    * @return all privileges on this column in this table
+    */
+   List<HiveObjectPrivilege> listTableColumnGrantsAll(
+       String catName, String dbName, String tableName, String columnName);
+ 
+   /**
+    * Register a user-defined function based on the function specification passed in.
+    * @param func function to create
+    * @throws InvalidObjectException incorrectly specified function
+    * @throws MetaException error accessing the RDBMS
+    */
+   void createFunction(Function func)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Alter function based on new function specs.
+    * @param dbName database name
+    * @param funcName function name
+    * @param newFunction new function specification
+    * @throws InvalidObjectException no such function, or incorrectly specified new function
+    * @throws MetaException incorrectly specified function
+    */
+   void alterFunction(String catName, String dbName, String funcName, Function newFunction)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Drop a function definition.
+    * @param dbName database name
+    * @param funcName function name
+    * @throws MetaException incorrectly specified function
+    * @throws NoSuchObjectException no such function
+    * @throws InvalidObjectException not sure when this is thrown
+    * @throws InvalidInputException not sure when this is thrown
+    */
+   void dropFunction(String catName, String dbName, String funcName)
+       throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException;
+ 
+   /**
+    * Retrieve function by name.
+    * @param dbName database name
+    * @param funcName function name
+    * @return the function
+    * @throws MetaException incorrectly specified function
+    */
+   Function getFunction(String catName, String dbName, String funcName) throws MetaException;
+ 
+   /**
+    * Retrieve all functions.
+    * @return all functions in a catalog
+    * @throws MetaException incorrectly specified function
+    */
+   List<Function> getAllFunctions(String catName) throws MetaException;
+ 
+   /**
+    * Retrieve list of function names based on name pattern.
+    * @param dbName database name
+    * @param pattern pattern to match
+    * @return functions that match the pattern
+    * @throws MetaException incorrectly specified function
+    */
+   List<String> getFunctions(String catName, String dbName, String pattern) throws MetaException;
+ 
+   /**
+    * Get aggregated stats for a table or partition(s).
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param partNames list of partition names.  These are the names of the partitions, not
+    *                  values.
+    * @param colNames list of column names
+    * @return aggregated stats
+    * @throws MetaException error accessing RDBMS
+    * @throws NoSuchObjectException no such table or partition
+    */
+   AggrStats get_aggr_stats_for(String catName, String dbName, String tblName,
+     List<String> partNames, List<String> colNames) throws MetaException, NoSuchObjectException;
+ 
+   /**
++   * Get aggregated stats for a table or partition(s).
++   * @param catName catalog name.
++   * @param dbName database name.
++   * @param tblName table name.
++   * @param partNames list of partition names.  These are the names of the partitions, not
++   *                  values.
++   * @param colNames list of column names
++   * @param txnId transaction id of the calling transaction
++   * @param writeIdList string format of valid writeId transaction list
++   * @return aggregated stats
++   * @throws MetaException error accessing RDBMS
++   * @throws NoSuchObjectException no such table or partition
++   */
++  AggrStats get_aggr_stats_for(String catName, String dbName, String tblName,
++    List<String> partNames, List<String> colNames,
++    long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException;
++
++  /**
+    * Get column stats for all partitions of all tables in the database
+    * @param catName catalog name
+    * @param dbName database name
+    * @return List of column stats objects for all partitions of all tables in the database
+    * @throws MetaException error accessing RDBMS
+    * @throws NoSuchObjectException no such database
+    */
+   List<ColStatsObjWithSourceInfo> getPartitionColStatsForDatabase(String catName, String dbName)
+       throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Get the next notification event.
+    * @param rqst Request containing information on the last processed notification.
+    * @return list of notifications, sorted by eventId
+    */
+   NotificationEventResponse getNextNotification(NotificationEventRequest rqst);
+ 
+ 
+   /**
+    * Add a notification entry.  This should only be called from inside the metastore
+    * @param event the notification to add
+    * @throws MetaException error accessing RDBMS
+    */
+   void addNotificationEvent(NotificationEvent event) throws MetaException;
+ 
+   /**
+    * Remove older notification events.
+    * @param olderThan Remove any events older than a given number of seconds
+    */
+   void cleanNotificationEvents(int olderThan);
+ 
+   /**
+    * Get the last issued notification event id.  This is intended for use by the export command
+    * so that users can determine the state of the system at the point of the export,
+    * and determine which notification events happened before or after the export.
+    * @return
+    */
+   CurrentNotificationEventId getCurrentNotificationEventId();
+ 
+   /**
+    * Get the number of events corresponding to given database with fromEventId.
+    * This is intended for use by the repl commands to track the progress of incremental dump.
+    * @return
+    */
+   NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst);
+ 
+   /*
+    * Flush any catalog objects held by the metastore implementation.  Note that this does not
+    * flush statistics objects.  This should be called at the beginning of each query.
+    */
+   void flushCache();
+ 
+   /**
+    * @param fileIds List of file IDs from the filesystem.
+    * @return File metadata buffers from file metadata cache. The array is fileIds-sized, and
+    *         the entries (or nulls, if metadata is not in cache) correspond to fileIds in the list
+    */
+   ByteBuffer[] getFileMetadata(List<Long> fileIds) throws MetaException;
+ 
+   /**
+    * @param fileIds List of file IDs from the filesystem.
+    * @param metadata Metadata buffers corresponding to fileIds in the list.
+    * @param type The type; determines the class that can do additiona processing for metadata.
+    */
+   void putFileMetadata(List<Long> fileIds, List<ByteBuffer> metadata,
+       FileMetadataExprType type) throws MetaException;
+ 
+   /**
+    * @return Whether file metadata cache is supported by this implementation.
+    */
+   boolean isFileMetadataSupported();
+ 
+   /**
+    * Gets file metadata from cache after applying a format-specific expression that can
+    * produce additional information based on file metadata and also filter the file list.
+    * @param fileIds List of file IDs from the filesystem.
+    * @param expr Format-specific serialized expression applicable to the files' metadatas.
+    * @param type Expression type; used to determine the class that handles the metadata.
+    * @param metadatas Output parameter; fileIds-sized array to receive the metadatas
+    *                  for corresponding files, if any.
+    * @param exprResults Output parameter; fileIds-sized array to receive the format-specific
+    *                    expression results for the corresponding files.
+    * @param eliminated Output parameter; fileIds-sized array to receive the indication of whether
+    *                   the corresponding files are entirely eliminated by the expression.
+    */
+   void getFileMetadataByExpr(List<Long> fileIds, FileMetadataExprType type, byte[] expr,
+       ByteBuffer[] metadatas, ByteBuffer[] exprResults, boolean[] eliminated)
+           throws MetaException;
+ 
+   /** Gets file metadata handler for the corresponding type. */
+   FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type);
+ 
+   /**
+    * Gets total number of tables.
+    */
+   @InterfaceStability.Evolving
+   int getTableCount() throws MetaException;
+ 
+   /**
+    * Gets total number of partitions.
+    */
+   @InterfaceStability.Evolving
+   int getPartitionCount() throws MetaException;
+ 
+   /**
+    * Gets total number of databases.
+    */
+   @InterfaceStability.Evolving
+   int getDatabaseCount() throws MetaException;
+ 
+   /**
+    * Get the primary associated with a table.  Strangely enough each SQLPrimaryKey is actually a
+    * column in they key, not the key itself.  Thus the list.
+    * @param catName catalog name
+    * @param db_name database name
+    * @param tbl_name table name
+    * @return list of primary key columns or an empty list if the table does not have a primary key
+    * @throws MetaException error accessing the RDBMS
+    */
+   List<SQLPrimaryKey> getPrimaryKeys(String catName, String db_name, String tbl_name)
+       throws MetaException;
+ 
+   /**
+    * Get the foreign keys for a table.  All foreign keys for a particular table can be fetched by
+    * passing null for the last two arguments.
+    * @param catName catalog name.
+    * @param parent_db_name Database the table referred to is in.  This can be null to match all
+    *                       databases.
+    * @param parent_tbl_name Table that is referred to.  This can be null to match all tables.
+    * @param foreign_db_name Database the table with the foreign key is in.
+    * @param foreign_tbl_name Table with the foreign key.
+    * @return List of all matching foreign key columns.  Note that if more than one foreign key
+    * matches the arguments the results here will be all mixed together into a single list.
+    * @throws MetaException error access the RDBMS.
+    */
+   List<SQLForeignKey> getForeignKeys(String catName, String parent_db_name,
+     String parent_tbl_name, String foreign_db_name, String foreign_tbl_name)
+     throws MetaException;
+ 
+   /**
+    * Get unique constraints associated with a table.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @return list of unique constraints
+    * @throws MetaException error access the RDBMS.
+    */
+   List<SQLUniqueConstraint> getUniqueConstraints(String catName, String db_name,
+     String tbl_name) throws MetaException;
+ 
+   /**
+    * Get not null constraints on a table.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @return list of not null constraints
+    * @throws MetaException error accessing the RDBMS.
+    */
+   List<SQLNotNullConstraint> getNotNullConstraints(String catName, String db_name,
+     String tbl_name) throws MetaException;
+ 
+   /**
+    * Get default values for columns in a table.
+    * @param catName catalog name
+    * @param db_name database name
+    * @param tbl_name table name
+    * @return list of default values defined on the table.
+    * @throws MetaException error accessing the RDBMS
+    */
+   List<SQLDefaultConstraint> getDefaultConstraints(String catName, String db_name,
+                                                    String tbl_name) throws MetaException;
+ 
+   /**
+    * Get check constraints for columns in a table.
+    * @param catName catalog name.
+    * @param db_name database name
+    * @param tbl_name table name
+    * @return ccheck constraints for this table
+    * @throws MetaException error accessing the RDBMS
+    */
+   List<SQLCheckConstraint> getCheckConstraints(String catName, String db_name,
+                                                    String tbl_name) throws MetaException;
+ 
+   /**
+    * Create a table with constraints
+    * @param tbl table definition
+    * @param primaryKeys primary key definition, or null
+    * @param foreignKeys foreign key definition, or null
+    * @param uniqueConstraints unique constraints definition, or null
+    * @param notNullConstraints not null constraints definition, or null
+    * @param defaultConstraints default values definition, or null
+    * @return list of constraint names
+    * @throws InvalidObjectException one of the provided objects is malformed.
+    * @throws MetaException error accessing the RDBMS
+    */
+   List<String> createTableWithConstraints(Table tbl, List<SQLPrimaryKey> primaryKeys,
+     List<SQLForeignKey> foreignKeys, List<SQLUniqueConstraint> uniqueConstraints,
+     List<SQLNotNullConstraint> notNullConstraints,
+     List<SQLDefaultConstraint> defaultConstraints,
+     List<SQLCheckConstraint> checkConstraints) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Drop a constraint, any constraint.  I have no idea why add and get each have separate
+    * methods for each constraint type but drop has only one.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param constraintName name of the constraint
+    * @throws NoSuchObjectException no constraint of this name exists
+    */
+   default void dropConstraint(String catName, String dbName, String tableName,
+                               String constraintName) throws NoSuchObjectException {
+     dropConstraint(catName, dbName, tableName, constraintName, false);
+   }
+ 
+   /**
+    * Drop a constraint, any constraint.  I have no idea why add and get each have separate
+    * methods for each constraint type but drop has only one.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param constraintName name of the constraint
+    * @param missingOk if true, it is not an error if there is no constraint of this name.  If
+    *                  false and there is no constraint of this name an exception will be thrown.
+    * @throws NoSuchObjectException no constraint of this name exists and missingOk = false
+    */
+   void dropConstraint(String catName, String dbName, String tableName, String constraintName,
+                       boolean missingOk) throws NoSuchObjectException;
+ 
+   /**
+    * Add a primary key to a table.
+    * @param pks Columns in the primary key.
+    * @return the name of the constraint, as a list of strings.
+    * @throws InvalidObjectException The SQLPrimaryKeys list is malformed
+    * @throws MetaException error accessing the RDMBS
+    */
+   List<String> addPrimaryKeys(List<SQLPrimaryKey> pks) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Add a foreign key to a table.
+    * @param fks foreign key specification
+    * @return foreign key name.
+    * @throws InvalidObjectException the specification is malformed.
+    * @throws MetaException error accessing the RDBMS.
+    */
+   List<String> addForeignKeys(List<SQLForeignKey> fks) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Add unique constraints to a table.
+    * @param uks unique constraints specification
+    * @return unique constraint names.
+    * @throws InvalidObjectException the specification is malformed.
+    * @throws MetaException error accessing the RDBMS.
+    */
+   List<String> addUniqueConstraints(List<SQLUniqueConstraint> uks) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Add not null constraints to a table.
+    * @param nns not null constraint specifications
+    * @return constraint names.
+    * @throws InvalidObjectException the specification is malformed.
+    * @throws MetaException error accessing the RDBMS.
+    */
+   List<String> addNotNullConstraints(List<SQLNotNullConstraint> nns) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Add default values to a table definition
+    * @param dv list of default values
+    * @return constraint names
+    * @throws InvalidObjectException the specification is malformed.
+    * @throws MetaException error accessing the RDBMS.
+    */
+   List<String> addDefaultConstraints(List<SQLDefaultConstraint> dv)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Add check constraints to a table
+    * @param cc check constraints to add
+    * @return list of constraint names
+    * @throws InvalidObjectException the specification is malformed
+    * @throws MetaException error accessing the RDBMS
+    */
+   List<String> addCheckConstraints(List<SQLCheckConstraint> cc) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Gets the unique id of the backing datastore for the metadata
+    * @return
+    * @throws MetaException
+    */
+   String getMetastoreDbUuid() throws MetaException;
+ 
+   void createResourcePlan(WMResourcePlan resourcePlan, String copyFrom, int defaultPoolSize)
+       throws AlreadyExistsException, MetaException, InvalidObjectException, NoSuchObjectException;
+ 
+   WMFullResourcePlan getResourcePlan(String name) throws NoSuchObjectException, MetaException;
+ 
+   List<WMResourcePlan> getAllResourcePlans() throws MetaException;
+ 
+   WMFullResourcePlan alterResourcePlan(String name, WMNullableResourcePlan resourcePlan,
+       boolean canActivateDisabled, boolean canDeactivate, boolean isReplace)
+       throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+           MetaException;
+ 
+   WMFullResourcePlan getActiveResourcePlan() throws MetaException;
+ 
+   WMValidateResourcePlanResponse validateResourcePlan(String name)
+       throws NoSuchObjectException, InvalidObjectException, MetaException;
+ 
+   void dropResourcePlan(String name) throws NoSuchObjectException, MetaException;
+ 
+   void createWMTrigger(WMTrigger trigger)
+       throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+           MetaException;
+ 
+   void alterWMTrigger(WMTrigger trigger)
+       throws NoSuchObjectException, InvalidOperationException, MetaException;
+ 
+   void dropWMTrigger(String resourcePlanName, String triggerName)
+       throws NoSuchObjectException, InvalidOperationException, MetaException;
+ 
+   List<WMTrigger> getTriggersForResourcePlan(String resourcePlanName)
+       throws NoSuchObjectException, MetaException;
+ 
+   void createPool(WMPool pool) throws AlreadyExistsException, NoSuchObjectException,
+       InvalidOperationException, MetaException;
+ 
+   void alterPool(WMNullablePool pool, String poolPath) throws AlreadyExistsException,
+       NoSuchObjectException, InvalidOperationException, MetaException;
+ 
+   void dropWMPool(String resourcePlanName, String poolPath)
+       throws NoSuchObjectException, InvalidOperationException, MetaException;
+ 
+   void createOrUpdateWMMapping(WMMapping mapping, boolean update)
+       throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+           MetaException;
+ 
+   void dropWMMapping(WMMapping mapping)
+       throws NoSuchObjectException, InvalidOperationException, MetaException;
+ 
+   void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName, String poolPath)
+       throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+           MetaException;
+ 
+   void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName, String poolPath)
+       throws NoSuchObjectException, InvalidOperationException, MetaException;
+ 
+   /**
+    * Create a new ISchema.
+    * @param schema schema to create
+    * @throws AlreadyExistsException there's already a schema with this name
+    * @throws MetaException general database exception
+    */
+   void createISchema(ISchema schema) throws AlreadyExistsException, MetaException,
+       NoSuchObjectException;
+ 
+   /**
+    * Alter an existing ISchema.  This assumes the caller has already checked that such a schema
+    * exists.
+    * @param schemaName name of the schema
+    * @param newSchema new schema object
+    * @throws NoSuchObjectException no function with this name exists
+    * @throws MetaException general database exception
+    */
+   void alterISchema(ISchemaName schemaName, ISchema newSchema) throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Get an ISchema by name.
+    * @param schemaName schema descriptor
+    * @return ISchema
+    * @throws MetaException general database exception
+    */
+   ISchema getISchema(ISchemaName schemaName) throws MetaException;
+ 
+   /**
+    * Drop an ISchema.  This does not check whether there are valid versions of the schema in
+    * existence, it assumes the caller has already done that.
+    * @param schemaName schema descriptor
+    * @throws NoSuchObjectException no schema of this name exists
+    * @throws MetaException general database exception
+    */
+   void dropISchema(ISchemaName schemaName) throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Create a new version of an existing schema.
+    * @param schemaVersion version number
+    * @throws AlreadyExistsException a version of the schema with the same version number already
+    * exists.
+    * @throws InvalidObjectException the passed in SchemaVersion object has problems.
+    * @throws NoSuchObjectException no schema with the passed in name exists.
+    * @throws MetaException general database exception
+    */
+   void addSchemaVersion(SchemaVersion schemaVersion)
+       throws AlreadyExistsException, InvalidObjectException, NoSuchObjectException, MetaException;
+ 
+   /**
+    * Alter a schema version.  Note that the Thrift interface only supports changing the serde
+    * mapping and states.  This method does not guarantee it will check anymore than that.  This
+    * method does not understand the state transitions and just assumes that the new state it is
+    * passed is reasonable.
+    * @param version version descriptor for the schema
+    * @param newVersion altered SchemaVersion
+    * @throws NoSuchObjectException no such version of the named schema exists
+    * @throws MetaException general database exception
+    */
+   void alterSchemaVersion(SchemaVersionDescriptor version, SchemaVersion newVersion)
+       throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Get a specific schema version.
+    * @param version version descriptor for the schema
+    * @return the SchemaVersion
+    * @throws MetaException general database exception
+    */
+   SchemaVersion getSchemaVersion(SchemaVersionDescriptor version) throws MetaException;
+ 
+   /**
+    * Get the latest version of a schema.
+    * @param schemaName name of the schema
+    * @return latest version of the schema
+    * @throws MetaException general database exception
+    */
+   SchemaVersion getLatestSchemaVersion(ISchemaName schemaName) throws MetaException;
+ 
+   /**
+    * Get all of the versions of a schema
+    * @param schemaName name of the schema
+    * @return all versions of the schema
+    * @throws MetaException general database exception
+    */
+   List<SchemaVersion> getAllSchemaVersion(ISchemaName schemaName) throws MetaException;
+ 
+   /**
+    * Find all SchemaVersion objects that match a query.  The query will select all SchemaVersions
+    * that are equal to all of the non-null passed in arguments.  That is, if arguments
+    * colName='name', colNamespace=null, type='string' are passed in, then all schemas that have
+    * a column with colName 'name' and type 'string' will be returned.
+    * @param colName column name.  Null is ok, which will cause this field to not be used in the
+    *                query.
+    * @param colNamespace column namespace.   Null is ok, which will cause this field to not be
+    *                     used in the query.
+    * @param type column type.   Null is ok, which will cause this field to not be used in the
+    *             query.
+    * @return List of all SchemaVersions that match.  Note that there is no expectation that these
+    * SchemaVersions derive from the same ISchema.  The list will be empty if there are no
+    * matching SchemaVersions.
+    * @throws MetaException general database exception
+    */
+   List<SchemaVersion> getSchemaVersionsByColumns(String colName, String colNamespace, String type)
+       throws MetaException;
+ 
+   /**
+    * Drop a version of the schema.
+    * @param version version descriptor for the schema
+    * @throws NoSuchObjectException no such version of the named schema exists
+    * @throws MetaException general database exception
+    */
+   void dropSchemaVersion(SchemaVersionDescriptor version) throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Get serde information
+    * @param serDeName name of the SerDe
+    * @return the SerDe, or null if there is no such serde
+    * @throws NoSuchObjectException no serde with this name exists
+    * @throws MetaException general database exception
+    */
+   SerDeInfo getSerDeInfo(String serDeName) throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Add a serde
+    * @param serde serde to add
+    * @throws AlreadyExistsException a serde of this name already exists
+    * @throws MetaException general database exception
+    */
+   void addSerde(SerDeInfo serde) throws AlreadyExistsException, MetaException;
+ 
+   /** Adds a RuntimeStat for persistence. */
+   void addRuntimeStat(RuntimeStat stat) throws MetaException;
+ 
+   /** Reads runtime statistic entries. */
+   List<RuntimeStat> getRuntimeStats(int maxEntries, int maxCreateTime) throws MetaException;
+ 
+   /** Removes outdated statistics. */
+   int deleteRuntimeStats(int maxRetainSecs) throws MetaException;
+ 
+   List<TableName> getTableNamesWithStats() throws MetaException, NoSuchObjectException;
+ 
+   List<TableName> getAllTableNamesForStats() throws MetaException, NoSuchObjectException;
+ 
+   Map<String, List<String>> getPartitionColsWithStats(String catName, String dbName,
+       String tableName) throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Remove older notification events.
+    * @param olderThan Remove any events older than a given number of seconds
+    */
+   void cleanWriteNotificationEvents(int olderThan);
+ 
+   /**
+    * Get all write events for a specific transaction .
+    * @param txnId get all the events done by this transaction
+    * @param dbName the name of db for which dump is being taken
+    * @param tableName the name of the table for which the dump is being taken
+    */
+   List<WriteEventInfo> getAllWriteEventInfo(long txnId, String dbName, String tableName) throws MetaException;
+ }


[22/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 0000000,bdcbf41..9eb8424
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@@ -1,0 -1,12207 +1,12509 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import static org.apache.commons.lang.StringUtils.join;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
+ import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier;
+ 
+ import java.io.IOException;
+ import java.lang.reflect.Field;
+ import java.net.InetAddress;
+ import java.net.URI;
+ import java.nio.ByteBuffer;
+ import java.sql.Connection;
+ import java.sql.SQLException;
+ import java.sql.SQLIntegrityConstraintViolationException;
+ import java.sql.Statement;
+ import java.time.LocalDateTime;
+ import java.time.format.DateTimeFormatter;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collection;
+ import java.util.Collections;
+ import java.util.Comparator;
+ import java.util.HashMap;
+ import java.util.HashSet;
+ import java.util.Iterator;
+ import java.util.LinkedList;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Map.Entry;
+ import java.util.Properties;
+ import java.util.Set;
+ import java.util.TreeSet;
+ import java.util.UUID;
+ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.atomic.AtomicBoolean;
+ import java.util.concurrent.locks.Lock;
+ import java.util.concurrent.locks.ReentrantLock;
+ import java.util.regex.Pattern;
+ 
+ import javax.jdo.JDOCanRetryException;
+ import javax.jdo.JDODataStoreException;
+ import javax.jdo.JDOException;
+ import javax.jdo.JDOHelper;
+ import javax.jdo.JDOObjectNotFoundException;
+ import javax.jdo.PersistenceManager;
+ import javax.jdo.PersistenceManagerFactory;
+ import javax.jdo.Query;
+ import javax.jdo.Transaction;
+ import javax.jdo.datastore.DataStoreCache;
+ import javax.jdo.datastore.JDOConnection;
+ import javax.jdo.identity.IntIdentity;
+ import javax.sql.DataSource;
+ 
+ import com.google.common.base.Strings;
+ 
+ import org.apache.commons.collections.CollectionUtils;
+ import org.apache.commons.lang.ArrayUtils;
+ import org.apache.commons.lang.StringUtils;
+ import org.apache.commons.lang.exception.ExceptionUtils;
+ import org.apache.hadoop.classification.InterfaceAudience;
+ import org.apache.hadoop.classification.InterfaceStability;
+ import org.apache.hadoop.conf.Configurable;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.fs.Path;
 -import org.apache.hadoop.hive.common.DatabaseName;
 -import org.apache.hadoop.hive.common.StatsSetupConst;
 -import org.apache.hadoop.hive.common.TableName;
++import org.apache.hadoop.hive.common.*;
+ import org.apache.hadoop.hive.metastore.MetaStoreDirectSql.SqlFilterForPushdown;
 -import org.apache.hadoop.hive.metastore.api.AggrStats;
 -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 -import org.apache.hadoop.hive.metastore.api.Catalog;
 -import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
 -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 -import org.apache.hadoop.hive.metastore.api.CreationMetadata;
 -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 -import org.apache.hadoop.hive.metastore.api.Database;
 -import org.apache.hadoop.hive.metastore.api.FieldSchema;
 -import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 -import org.apache.hadoop.hive.metastore.api.Function;
 -import org.apache.hadoop.hive.metastore.api.FunctionType;
 -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
 -import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
 -import org.apache.hadoop.hive.metastore.api.HiveObjectType;
 -import org.apache.hadoop.hive.metastore.api.ISchema;
 -import org.apache.hadoop.hive.metastore.api.ISchemaName;
 -import org.apache.hadoop.hive.metastore.api.InvalidInputException;
 -import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
 -import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
 -import org.apache.hadoop.hive.metastore.api.MetaException;
 -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 -import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
 -import org.apache.hadoop.hive.metastore.api.Order;
 -import org.apache.hadoop.hive.metastore.api.Partition;
 -import org.apache.hadoop.hive.metastore.api.PartitionEventType;
 -import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
 -import org.apache.hadoop.hive.metastore.api.PartitionValuesRow;
 -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 -import org.apache.hadoop.hive.metastore.api.PrincipalType;
 -import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 -import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
 -import org.apache.hadoop.hive.metastore.api.ResourceType;
 -import org.apache.hadoop.hive.metastore.api.ResourceUri;
 -import org.apache.hadoop.hive.metastore.api.Role;
 -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
 -import org.apache.hadoop.hive.metastore.api.RuntimeStat;
 -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
 -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
 -import org.apache.hadoop.hive.metastore.api.SchemaCompatibility;
 -import org.apache.hadoop.hive.metastore.api.SchemaType;
 -import org.apache.hadoop.hive.metastore.api.SchemaValidation;
 -import org.apache.hadoop.hive.metastore.api.SchemaVersion;
 -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
 -import org.apache.hadoop.hive.metastore.api.SchemaVersionState;
 -import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 -import org.apache.hadoop.hive.metastore.api.SerdeType;
 -import org.apache.hadoop.hive.metastore.api.SkewedInfo;
 -import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 -import org.apache.hadoop.hive.metastore.api.Table;
 -import org.apache.hadoop.hive.metastore.api.TableMeta;
 -import org.apache.hadoop.hive.metastore.api.Type;
 -import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
 -import org.apache.hadoop.hive.metastore.api.UnknownTableException;
 -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WMMapping;
 -import org.apache.hadoop.hive.metastore.api.WMNullablePool;
 -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WMPool;
 -import org.apache.hadoop.hive.metastore.api.WMPoolTrigger;
 -import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus;
 -import org.apache.hadoop.hive.metastore.api.WMTrigger;
 -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
 -import org.apache.hadoop.hive.metastore.api.WriteEventInfo;
++import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.datasource.DataSourceProvider;
+ import org.apache.hadoop.hive.metastore.datasource.DataSourceProviderFactory;
+ import org.apache.hadoop.hive.metastore.metrics.Metrics;
+ import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
 -import org.apache.hadoop.hive.metastore.model.MCatalog;
 -import org.apache.hadoop.hive.metastore.model.MColumnDescriptor;
 -import org.apache.hadoop.hive.metastore.model.MConstraint;
 -import org.apache.hadoop.hive.metastore.model.MCreationMetadata;
 -import org.apache.hadoop.hive.metastore.model.MDBPrivilege;
 -import org.apache.hadoop.hive.metastore.model.MDatabase;
 -import org.apache.hadoop.hive.metastore.model.MDelegationToken;
 -import org.apache.hadoop.hive.metastore.model.MFieldSchema;
 -import org.apache.hadoop.hive.metastore.model.MFunction;
 -import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege;
 -import org.apache.hadoop.hive.metastore.model.MISchema;
 -import org.apache.hadoop.hive.metastore.model.MMasterKey;
 -import org.apache.hadoop.hive.metastore.model.MMetastoreDBProperties;
 -import org.apache.hadoop.hive.metastore.model.MNotificationLog;
 -import org.apache.hadoop.hive.metastore.model.MNotificationNextId;
 -import org.apache.hadoop.hive.metastore.model.MOrder;
 -import org.apache.hadoop.hive.metastore.model.MPartition;
 -import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege;
 -import org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics;
 -import org.apache.hadoop.hive.metastore.model.MPartitionEvent;
 -import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege;
 -import org.apache.hadoop.hive.metastore.model.MResourceUri;
 -import org.apache.hadoop.hive.metastore.model.MRole;
 -import org.apache.hadoop.hive.metastore.model.MRoleMap;
 -import org.apache.hadoop.hive.metastore.model.MRuntimeStat;
 -import org.apache.hadoop.hive.metastore.model.MSchemaVersion;
 -import org.apache.hadoop.hive.metastore.model.MSerDeInfo;
 -import org.apache.hadoop.hive.metastore.model.MStorageDescriptor;
 -import org.apache.hadoop.hive.metastore.model.MStringList;
 -import org.apache.hadoop.hive.metastore.model.MTable;
 -import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege;
 -import org.apache.hadoop.hive.metastore.model.MTableColumnStatistics;
 -import org.apache.hadoop.hive.metastore.model.MTablePrivilege;
 -import org.apache.hadoop.hive.metastore.model.MType;
 -import org.apache.hadoop.hive.metastore.model.MVersionTable;
 -import org.apache.hadoop.hive.metastore.model.MWMMapping;
++import org.apache.hadoop.hive.metastore.model.*;
+ import org.apache.hadoop.hive.metastore.model.MWMMapping.EntityType;
 -import org.apache.hadoop.hive.metastore.model.MWMPool;
 -import org.apache.hadoop.hive.metastore.model.MWMResourcePlan;
+ import org.apache.hadoop.hive.metastore.model.MWMResourcePlan.Status;
 -import org.apache.hadoop.hive.metastore.model.MWMTrigger;
 -import org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog;
+ import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
+ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.tools.SQLGenerator;
++import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
++import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+ import org.apache.hadoop.hive.metastore.utils.FileUtils;
+ import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.hadoop.hive.metastore.utils.ObjectPair;
++import org.apache.hive.common.util.TxnIdUtils;
+ import org.apache.thrift.TException;
+ import org.datanucleus.AbstractNucleusContext;
+ import org.datanucleus.ClassLoaderResolver;
+ import org.datanucleus.ClassLoaderResolverImpl;
+ import org.datanucleus.NucleusContext;
+ import org.datanucleus.PropertyNames;
+ import org.datanucleus.api.jdo.JDOPersistenceManager;
+ import org.datanucleus.api.jdo.JDOPersistenceManagerFactory;
+ import org.datanucleus.store.rdbms.exceptions.MissingTableException;
+ import org.datanucleus.store.scostore.Store;
+ import org.datanucleus.util.WeakValueMap;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import com.codahale.metrics.Counter;
+ import com.codahale.metrics.MetricRegistry;
+ import com.google.common.annotations.VisibleForTesting;
+ import com.google.common.base.Preconditions;
+ import com.google.common.collect.Lists;
+ import com.google.common.collect.Maps;
+ import com.google.common.collect.Sets;
+ 
+ 
+ /**
+  * This class is the interface between the application logic and the database
+  * store that contains the objects. Refrain putting any logic in mode.M* objects
+  * or in this file as former could be auto generated and this class would need
+  * to be made into a interface that can read both from a database and a
+  * filestore.
+  */
+ public class ObjectStore implements RawStore, Configurable {
+   private static Properties prop = null;
+   private static PersistenceManagerFactory pmf = null;
+   private static boolean forTwoMetastoreTesting = false;
+   private int batchSize = Batchable.NO_BATCHING;
+ 
+   private static final DateTimeFormatter YMDHMS_FORMAT = DateTimeFormatter.ofPattern(
+       "yyyy_MM_dd_HH_mm_ss");
+ 
+   private static Lock pmfPropLock = new ReentrantLock();
+   /**
+   * Verify the schema only once per JVM since the db connection info is static
+   */
+   private final static AtomicBoolean isSchemaVerified = new AtomicBoolean(false);
+   private static final Logger LOG = LoggerFactory.getLogger(ObjectStore.class);
+ 
+   private enum TXN_STATUS {
+     NO_STATE, OPEN, COMMITED, ROLLBACK
+   }
+ 
+   private static final Map<String, Class<?>> PINCLASSMAP;
+   private static final String HOSTNAME;
+   private static final String USER;
+   private static final String JDO_PARAM = ":param";
+   static {
+     Map<String, Class<?>> map = new HashMap<>();
+     map.put("table", MTable.class);
+     map.put("storagedescriptor", MStorageDescriptor.class);
+     map.put("serdeinfo", MSerDeInfo.class);
+     map.put("partition", MPartition.class);
+     map.put("database", MDatabase.class);
+     map.put("type", MType.class);
+     map.put("fieldschema", MFieldSchema.class);
+     map.put("order", MOrder.class);
+     PINCLASSMAP = Collections.unmodifiableMap(map);
+     String hostname = "UNKNOWN";
+     try {
+       InetAddress clientAddr = InetAddress.getLocalHost();
+       hostname = clientAddr.getHostAddress();
+     } catch (IOException e) {
+     }
+     HOSTNAME = hostname;
+     String user = System.getenv("USER");
+     USER = org.apache.commons.lang.StringUtils.defaultString(user, "UNKNOWN");
+   }
+ 
+ 
+   private boolean isInitialized = false;
+   private PersistenceManager pm = null;
+   private SQLGenerator sqlGenerator = null;
+   private MetaStoreDirectSql directSql = null;
+   private DatabaseProduct dbType = null;
+   private PartitionExpressionProxy expressionProxy = null;
+   private Configuration conf;
+   private volatile int openTrasactionCalls = 0;
+   private Transaction currentTransaction = null;
+   private TXN_STATUS transactionStatus = TXN_STATUS.NO_STATE;
+   private Pattern partitionValidationPattern;
+   private Counter directSqlErrors;
++  private boolean areTxnStatsSupported = false;
+ 
+   /**
+    * A Autocloseable wrapper around Query class to pass the Query object to the caller and let the caller release
+    * the resources when the QueryWrapper goes out of scope
+    */
+   public static class QueryWrapper implements AutoCloseable {
+     public Query query;
+ 
+     /**
+      * Explicitly closes the query object to release the resources
+      */
+     @Override
+     public void close() {
+       if (query != null) {
+         query.closeAll();
+         query = null;
+       }
+     }
+   }
+ 
+   public ObjectStore() {
+   }
+ 
+   @Override
+   public Configuration getConf() {
+     return conf;
+   }
+ 
+   /**
+    * Called whenever this object is instantiated using ReflectionUtils, and also
+    * on connection retries. In cases of connection retries, conf will usually
+    * contain modified values.
+    */
+   @Override
+   @SuppressWarnings("nls")
+   public void setConf(Configuration conf) {
+     // Although an instance of ObjectStore is accessed by one thread, there may
+     // be many threads with ObjectStore instances. So the static variables
+     // pmf and prop need to be protected with locks.
+     pmfPropLock.lock();
+     try {
+       isInitialized = false;
+       this.conf = conf;
++      this.areTxnStatsSupported = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_TXN_STATS_ENABLED);
+       configureSSL(conf);
+       Properties propsFromConf = getDataSourceProps(conf);
+       boolean propsChanged = !propsFromConf.equals(prop);
+ 
+       if (propsChanged) {
+         if (pmf != null){
+           clearOutPmfClassLoaderCache(pmf);
+           if (!forTwoMetastoreTesting) {
+             // close the underlying connection pool to avoid leaks
+             pmf.close();
+           }
+         }
+         pmf = null;
+         prop = null;
+       }
+ 
+       assert(!isActiveTransaction());
+       shutdown();
+       // Always want to re-create pm as we don't know if it were created by the
+       // most recent instance of the pmf
+       pm = null;
+       directSql = null;
+       expressionProxy = null;
+       openTrasactionCalls = 0;
+       currentTransaction = null;
+       transactionStatus = TXN_STATUS.NO_STATE;
+ 
+       initialize(propsFromConf);
+ 
+       String partitionValidationRegex =
+           MetastoreConf.getVar(this.conf, ConfVars.PARTITION_NAME_WHITELIST_PATTERN);
+       if (partitionValidationRegex != null && !partitionValidationRegex.isEmpty()) {
+         partitionValidationPattern = Pattern.compile(partitionValidationRegex);
+       } else {
+         partitionValidationPattern = null;
+       }
+ 
+       // Note, if metrics have not been initialized this will return null, which means we aren't
+       // using metrics.  Thus we should always check whether this is non-null before using.
+       MetricRegistry registry = Metrics.getRegistry();
+       if (registry != null) {
+         directSqlErrors = Metrics.getOrCreateCounter(MetricsConstants.DIRECTSQL_ERRORS);
+       }
+ 
+       this.batchSize = MetastoreConf.getIntVar(conf, ConfVars.RAWSTORE_PARTITION_BATCH_SIZE);
+ 
+       if (!isInitialized) {
+         throw new RuntimeException(
+         "Unable to create persistence manager. Check dss.log for details");
+       } else {
+         LOG.debug("Initialized ObjectStore");
+       }
+     } finally {
+       pmfPropLock.unlock();
+     }
+   }
+ 
+   private ClassLoader classLoader;
+   {
+     classLoader = Thread.currentThread().getContextClassLoader();
+     if (classLoader == null) {
+       classLoader = ObjectStore.class.getClassLoader();
+     }
+   }
+ 
+   @SuppressWarnings("nls")
+   private void initialize(Properties dsProps) {
+     int retryLimit = MetastoreConf.getIntVar(conf, ConfVars.HMS_HANDLER_ATTEMPTS);
+     long retryInterval = MetastoreConf.getTimeVar(conf,
+         ConfVars.HMS_HANDLER_INTERVAL, TimeUnit.MILLISECONDS);
+     int numTries = retryLimit;
+ 
+     while (numTries > 0){
+       try {
+         initializeHelper(dsProps);
+         return; // If we reach here, we succeed.
+       } catch (Exception e){
+         numTries--;
+         boolean retriable = isRetriableException(e);
+         if ((numTries > 0) && retriable){
+           LOG.info("Retriable exception while instantiating ObjectStore, retrying. " +
+               "{} tries left", numTries, e);
+           try {
+             Thread.sleep(retryInterval);
+           } catch (InterruptedException ie) {
+             // Restore the interrupted status, since we do not want to catch it.
+             LOG.debug("Interrupted while sleeping before retrying.", ie);
+             Thread.currentThread().interrupt();
+           }
+           // If we're here, we'll proceed down the next while loop iteration.
+         } else {
+           // we've reached our limit, throw the last one.
+           if (retriable){
+             LOG.warn("Exception retry limit reached, not retrying any longer.",
+               e);
+           } else {
+             LOG.debug("Non-retriable exception during ObjectStore initialize.", e);
+           }
+           throw e;
+         }
+       }
+     }
+   }
+ 
+   private static final Set<Class<? extends Throwable>> retriableExceptionClasses =
+       new HashSet<>(Arrays.asList(JDOCanRetryException.class));
+   /**
+    * Helper function for initialize to determine if we should retry an exception.
+    * We return true if the exception is of a known type of retriable exceptions, or if one
+    * of its recursive .getCause returns a known type of retriable exception.
+    */
+   private boolean isRetriableException(Throwable e) {
+     if (e == null){
+       return false;
+     }
+     if (retriableExceptionClasses.contains(e.getClass())){
+       return true;
+     }
+     for (Class<? extends Throwable> c : retriableExceptionClasses){
+       if (c.isInstance(e)){
+         return true;
+       }
+     }
+ 
+     if (e.getCause() == null){
+       return false;
+     }
+     return isRetriableException(e.getCause());
+   }
+ 
+   /**
+    * private helper to do initialization routine, so we can retry if needed if it fails.
+    * @param dsProps
+    */
+   private void initializeHelper(Properties dsProps) {
+     LOG.debug("ObjectStore, initialize called");
+     prop = dsProps;
+     pm = getPersistenceManager();
+     try {
+       String productName = MetaStoreDirectSql.getProductName(pm);
+       sqlGenerator = new SQLGenerator(DatabaseProduct.determineDatabaseProduct(productName), conf);
+     } catch (SQLException e) {
+       LOG.error("error trying to figure out the database product", e);
+       throw new RuntimeException(e);
+     }
+     isInitialized = pm != null;
+     if (isInitialized) {
+       dbType = determineDatabaseProduct();
+       expressionProxy = createExpressionProxy(conf);
+       if (MetastoreConf.getBoolVar(getConf(), ConfVars.TRY_DIRECT_SQL)) {
+         String schema = prop.getProperty("javax.jdo.mapping.Schema");
+         schema = org.apache.commons.lang.StringUtils.defaultIfBlank(schema, null);
+         directSql = new MetaStoreDirectSql(pm, conf, schema);
+       }
+     }
+     LOG.debug("RawStore: {}, with PersistenceManager: {}" +
+         " created in the thread with id: {}", this, pm, Thread.currentThread().getId());
+   }
+ 
+   private DatabaseProduct determineDatabaseProduct() {
+     try {
+       return DatabaseProduct.determineDatabaseProduct(getProductName(pm));
+     } catch (SQLException e) {
+       LOG.warn("Cannot determine database product; assuming OTHER", e);
+       return DatabaseProduct.OTHER;
+     }
+   }
+ 
+   private static String getProductName(PersistenceManager pm) {
+     JDOConnection jdoConn = pm.getDataStoreConnection();
+     try {
+       return ((Connection)jdoConn.getNativeConnection()).getMetaData().getDatabaseProductName();
+     } catch (Throwable t) {
+       LOG.warn("Error retrieving product name", t);
+       return null;
+     } finally {
+       jdoConn.close(); // We must release the connection before we call other pm methods.
+     }
+   }
+ 
+   /**
+    * Creates the proxy used to evaluate expressions. This is here to prevent circular
+    * dependency - ql -&gt; metastore client &lt;-&gt metastore server -&gt ql. If server and
+    * client are split, this can be removed.
+    * @param conf Configuration.
+    * @return The partition expression proxy.
+    */
+   private static PartitionExpressionProxy createExpressionProxy(Configuration conf) {
+     String className = MetastoreConf.getVar(conf, ConfVars.EXPRESSION_PROXY_CLASS);
+     try {
+       Class<? extends PartitionExpressionProxy> clazz =
+            JavaUtils.getClass(className, PartitionExpressionProxy.class);
+       return JavaUtils.newInstance(clazz, new Class<?>[0], new Object[0]);
+     } catch (MetaException e) {
+       LOG.error("Error loading PartitionExpressionProxy", e);
+       throw new RuntimeException("Error loading PartitionExpressionProxy: " + e.getMessage());
+     }
+   }
+ 
+   /**
+    * Configure the SSL properties of the connection from provided config
+    * @param conf
+    */
+   private static void configureSSL(Configuration conf) {
+     // SSL support
+     String sslPropString = MetastoreConf.getVar(conf, ConfVars.DBACCESS_SSL_PROPS);
+     if (org.apache.commons.lang.StringUtils.isNotEmpty(sslPropString)) {
+       LOG.info("Metastore setting SSL properties of the connection to backed DB");
+       for (String sslProp : sslPropString.split(",")) {
+         String[] pair = sslProp.trim().split("=");
+         if (pair != null && pair.length == 2) {
+           System.setProperty(pair[0].trim(), pair[1].trim());
+         } else {
+           LOG.warn("Invalid metastore property value for {}", ConfVars.DBACCESS_SSL_PROPS);
+         }
+       }
+     }
+   }
+ 
+   /**
+    * Properties specified in hive-default.xml override the properties specified
+    * in jpox.properties.
+    */
+   @SuppressWarnings("nls")
+   private static Properties getDataSourceProps(Configuration conf) {
+     Properties prop = new Properties();
+     correctAutoStartMechanism(conf);
+ 
+     // First, go through and set all our values for datanucleus and javax.jdo parameters.  This
+     // has to be a separate first step because we don't set the default values in the config object.
+     for (ConfVars var : MetastoreConf.dataNucleusAndJdoConfs) {
+       String confVal = MetastoreConf.getAsString(conf, var);
+       String varName = var.getVarname();
+       Object prevVal = prop.setProperty(varName, confVal);
+       if (MetastoreConf.isPrintable(varName)) {
+         LOG.debug("Overriding {} value {} from jpox.properties with {}",
+           varName, prevVal, confVal);
+       }
+     }
+ 
+     // Now, we need to look for any values that the user set that MetastoreConf doesn't know about.
+     // TODO Commenting this out for now, as it breaks because the conf values aren't getting properly
+     // interpolated in case of variables.  See HIVE-17788.
+     /*
+     for (Map.Entry<String, String> e : conf) {
+       if (e.getKey().startsWith("datanucleus.") || e.getKey().startsWith("javax.jdo.")) {
+         // We have to handle this differently depending on whether it is a value known to
+         // MetastoreConf or not.  If it is, we need to get the default value if a value isn't
+         // provided.  If not, we just set whatever the user has set.
+         Object prevVal = prop.setProperty(e.getKey(), e.getValue());
+         if (LOG.isDebugEnabled() && MetastoreConf.isPrintable(e.getKey())) {
+           LOG.debug("Overriding " + e.getKey() + " value " + prevVal
+               + " from  jpox.properties with " + e.getValue());
+         }
+       }
+     }
+     */
+ 
+     // Password may no longer be in the conf, use getPassword()
+     try {
+       String passwd = MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.PWD);
+       if (org.apache.commons.lang.StringUtils.isNotEmpty(passwd)) {
+         // We can get away with the use of varname here because varname == hiveName for PWD
+         prop.setProperty(ConfVars.PWD.getVarname(), passwd);
+       }
+     } catch (IOException err) {
+       throw new RuntimeException("Error getting metastore password: " + err.getMessage(), err);
+     }
+ 
+     if (LOG.isDebugEnabled()) {
+       for (Entry<Object, Object> e : prop.entrySet()) {
+         if (MetastoreConf.isPrintable(e.getKey().toString())) {
+           LOG.debug("{} = {}", e.getKey(), e.getValue());
+         }
+       }
+     }
+ 
+     return prop;
+   }
+ 
+   /**
+    * Update conf to set datanucleus.autoStartMechanismMode=ignored.
+    * This is necessary to able to use older version of hive against
+    * an upgraded but compatible metastore schema in db from new version
+    * of hive
+    * @param conf
+    */
+   private static void correctAutoStartMechanism(Configuration conf) {
+     final String autoStartKey = "datanucleus.autoStartMechanismMode";
+     final String autoStartIgnore = "ignored";
+     String currentAutoStartVal = conf.get(autoStartKey);
+     if (!autoStartIgnore.equalsIgnoreCase(currentAutoStartVal)) {
+       LOG.warn("{} is set to unsupported value {} . Setting it to value: {}", autoStartKey,
+         conf.get(autoStartKey), autoStartIgnore);
+     }
+     conf.set(autoStartKey, autoStartIgnore);
+   }
+ 
+   private static synchronized PersistenceManagerFactory getPMF() {
+     if (pmf == null) {
+ 
+       Configuration conf = MetastoreConf.newMetastoreConf();
+       DataSourceProvider dsp = DataSourceProviderFactory.hasProviderSpecificConfigurations(conf) ?
+               DataSourceProviderFactory.getDataSourceProvider(conf) : null;
+ 
+       if (dsp == null) {
+         pmf = JDOHelper.getPersistenceManagerFactory(prop);
+       } else {
+         try {
+           DataSource ds = dsp.create(conf);
+           Map<Object, Object> dsProperties = new HashMap<>();
+           //Any preexisting datanucleus property should be passed along
+           dsProperties.putAll(prop);
+           dsProperties.put(PropertyNames.PROPERTY_CONNECTION_FACTORY, ds);
+           dsProperties.put(PropertyNames.PROPERTY_CONNECTION_FACTORY2, ds);
+           dsProperties.put("javax.jdo.PersistenceManagerFactoryClass",
+               "org.datanucleus.api.jdo.JDOPersistenceManagerFactory");
+           pmf = JDOHelper.getPersistenceManagerFactory(dsProperties);
+         } catch (SQLException e) {
+           LOG.warn("Could not create PersistenceManagerFactory using " +
+               "connection pool properties, will fall back", e);
+           pmf = JDOHelper.getPersistenceManagerFactory(prop);
+         }
+       }
+       DataStoreCache dsc = pmf.getDataStoreCache();
+       if (dsc != null) {
+         String objTypes = MetastoreConf.getVar(conf, ConfVars.CACHE_PINOBJTYPES);
+         LOG.info("Setting MetaStore object pin classes with hive.metastore.cache.pinobjtypes=\"{}\"", objTypes);
+         if (org.apache.commons.lang.StringUtils.isNotEmpty(objTypes)) {
+           String[] typeTokens = objTypes.toLowerCase().split(",");
+           for (String type : typeTokens) {
+             type = type.trim();
+             if (PINCLASSMAP.containsKey(type)) {
+               dsc.pinAll(true, PINCLASSMAP.get(type));
+             } else {
+               LOG.warn("{} is not one of the pinnable object types: {}", type,
+                 org.apache.commons.lang.StringUtils.join(PINCLASSMAP.keySet(), " "));
+             }
+           }
+         }
+       } else {
+         LOG.warn("PersistenceManagerFactory returned null DataStoreCache object. Unable to initialize object pin types defined by hive.metastore.cache.pinobjtypes");
+       }
+     }
+     return pmf;
+   }
+ 
+   @InterfaceAudience.LimitedPrivate({"HCATALOG"})
+   @InterfaceStability.Evolving
+   public PersistenceManager getPersistenceManager() {
+     return getPMF().getPersistenceManager();
+   }
+ 
+   @Override
+   public void shutdown() {
+     LOG.debug("RawStore: {}, with PersistenceManager: {} will be shutdown", this, pm);
+     if (pm != null) {
+       pm.close();
+       pm = null;
+     }
+   }
+ 
+   /**
+    * Opens a new one or the one already created Every call of this function must
+    * have corresponding commit or rollback function call
+    *
+    * @return an active transaction
+    */
+ 
+   @Override
+   public boolean openTransaction() {
+     openTrasactionCalls++;
+     if (openTrasactionCalls == 1) {
+       currentTransaction = pm.currentTransaction();
+       currentTransaction.begin();
+       transactionStatus = TXN_STATUS.OPEN;
+     } else {
+       // openTransactionCalls > 1 means this is an interior transaction
+       // We should already have a transaction created that is active.
+       if ((currentTransaction == null) || (!currentTransaction.isActive())){
+         throw new RuntimeException("openTransaction called in an interior"
+             + " transaction scope, but currentTransaction is not active.");
+       }
+     }
+ 
+     boolean result = currentTransaction.isActive();
+     debugLog("Open transaction: count = " + openTrasactionCalls + ", isActive = " + result);
+     return result;
+   }
+ 
+   /**
+    * if this is the commit of the first open call then an actual commit is
+    * called.
+    *
+    * @return Always returns true
+    */
+   @Override
+   @SuppressWarnings("nls")
+   public boolean commitTransaction() {
+     if (TXN_STATUS.ROLLBACK == transactionStatus) {
+       debugLog("Commit transaction: rollback");
+       return false;
+     }
+     if (openTrasactionCalls <= 0) {
+       RuntimeException e = new RuntimeException("commitTransaction was called but openTransactionCalls = "
+           + openTrasactionCalls + ". This probably indicates that there are unbalanced " +
+           "calls to openTransaction/commitTransaction");
+       LOG.error("Unbalanced calls to open/commit Transaction", e);
+       throw e;
+     }
+     if (!currentTransaction.isActive()) {
+       RuntimeException e = new RuntimeException("commitTransaction was called but openTransactionCalls = "
+           + openTrasactionCalls + ". This probably indicates that there are unbalanced " +
+           "calls to openTransaction/commitTransaction");
+       LOG.error("Unbalanced calls to open/commit Transaction", e);
+       throw e;
+     }
+     openTrasactionCalls--;
+     debugLog("Commit transaction: count = " + openTrasactionCalls + ", isactive "+ currentTransaction.isActive());
+ 
+     if ((openTrasactionCalls == 0) && currentTransaction.isActive()) {
+       transactionStatus = TXN_STATUS.COMMITED;
+       currentTransaction.commit();
+     }
+     return true;
+   }
+ 
+   /**
+    * @return true if there is an active transaction. If the current transaction
+    *         is either committed or rolled back it returns false
+    */
+   @Override
+   public boolean isActiveTransaction() {
+     if (currentTransaction == null) {
+       return false;
+     }
+     return currentTransaction.isActive();
+   }
+ 
+   /**
+    * Rolls back the current transaction if it is active
+    */
+   @Override
+   public void rollbackTransaction() {
+     if (openTrasactionCalls < 1) {
+       debugLog("rolling back transaction: no open transactions: " + openTrasactionCalls);
+       return;
+     }
+     debugLog("Rollback transaction, isActive: " + currentTransaction.isActive());
+     try {
+       if (currentTransaction.isActive()
+           && transactionStatus != TXN_STATUS.ROLLBACK) {
+         currentTransaction.rollback();
+       }
+     } finally {
+       openTrasactionCalls = 0;
+       transactionStatus = TXN_STATUS.ROLLBACK;
+       // remove all detached objects from the cache, since the transaction is
+       // being rolled back they are no longer relevant, and this prevents them
+       // from reattaching in future transactions
+       pm.evictAll();
+     }
+   }
+ 
+   @Override
+   public void createCatalog(Catalog cat) throws MetaException {
+     LOG.debug("Creating catalog " + cat.getName());
+     boolean committed = false;
+     MCatalog mCat = catToMCat(cat);
+     try {
+       openTransaction();
+       pm.makePersistent(mCat);
+       committed = commitTransaction();
+     } finally {
+       if (!committed) {
+         rollbackTransaction();
+       }
+     }
+   }
+ 
+   @Override
+   public void alterCatalog(String catName, Catalog cat)
+       throws MetaException, InvalidOperationException {
+     if (!cat.getName().equals(catName)) {
+       throw new InvalidOperationException("You cannot change a catalog's name");
+     }
+     boolean committed = false;
+     try {
+       MCatalog mCat = getMCatalog(catName);
+       if (org.apache.commons.lang.StringUtils.isNotBlank(cat.getLocationUri())) {
+         mCat.setLocationUri(cat.getLocationUri());
+       }
+       if (org.apache.commons.lang.StringUtils.isNotBlank(cat.getDescription())) {
+         mCat.setDescription(cat.getDescription());
+       }
+       openTransaction();
+       pm.makePersistent(mCat);
+       committed = commitTransaction();
+     } finally {
+       if (!committed) {
+         rollbackTransaction();
+       }
+     }
+   }
+ 
+   @Override
+   public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException {
+     LOG.debug("Fetching catalog " + catalogName);
+     MCatalog mCat = getMCatalog(catalogName);
+     if (mCat == null) {
+       throw new NoSuchObjectException("No catalog " + catalogName);
+     }
+     return mCatToCat(mCat);
+   }
+ 
+   @Override
+   public List<String> getCatalogs() throws MetaException {
+     LOG.debug("Fetching all catalog names");
+     boolean commited = false;
+     List<String> catalogs = null;
+ 
+     String queryStr = "select name from org.apache.hadoop.hive.metastore.model.MCatalog";
+     Query query = null;
+ 
+     openTransaction();
+     try {
+       query = pm.newQuery(queryStr);
+       query.setResult("name");
+       catalogs = new ArrayList<>((Collection<String>) query.execute());
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     Collections.sort(catalogs);
+     return catalogs;
+   }
+ 
+   @Override
+   public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException {
+     LOG.debug("Dropping catalog " + catalogName);
+     boolean committed = false;
+     try {
+       openTransaction();
+       MCatalog mCat = getMCatalog(catalogName);
+       pm.retrieve(mCat);
+       if (mCat == null) {
+         throw new NoSuchObjectException("No catalog " + catalogName);
+       }
+       pm.deletePersistent(mCat);
+       committed = commitTransaction();
+     } finally {
+       if (!committed) {
+         rollbackTransaction();
+       }
+     }
+   }
+ 
+   private MCatalog getMCatalog(String catalogName) throws MetaException {
+     boolean committed = false;
+     Query query = null;
+     try {
+       openTransaction();
+       catalogName = normalizeIdentifier(catalogName);
+       query = pm.newQuery(MCatalog.class, "name == catname");
+       query.declareParameters("java.lang.String catname");
+       query.setUnique(true);
+       MCatalog mCat = (MCatalog)query.execute(catalogName);
+       pm.retrieve(mCat);
+       committed = commitTransaction();
+       return mCat;
+     } finally {
+       rollbackAndCleanup(committed, query);
+     }
+   }
+ 
+   private MCatalog catToMCat(Catalog cat) {
+     MCatalog mCat = new MCatalog();
+     mCat.setName(normalizeIdentifier(cat.getName()));
+     if (cat.isSetDescription()) {
+       mCat.setDescription(cat.getDescription());
+     }
+     mCat.setLocationUri(cat.getLocationUri());
+     return mCat;
+   }
+ 
+   private Catalog mCatToCat(MCatalog mCat) {
+     Catalog cat = new Catalog(mCat.getName(), mCat.getLocationUri());
+     if (mCat.getDescription() != null) {
+       cat.setDescription(mCat.getDescription());
+     }
+     return cat;
+   }
+ 
+   @Override
+   public void createDatabase(Database db) throws InvalidObjectException, MetaException {
+     boolean commited = false;
+     MDatabase mdb = new MDatabase();
+     assert db.getCatalogName() != null;
+     mdb.setCatalogName(normalizeIdentifier(db.getCatalogName()));
+     assert mdb.getCatalogName() != null;
+     mdb.setName(db.getName().toLowerCase());
+     mdb.setLocationUri(db.getLocationUri());
+     mdb.setDescription(db.getDescription());
+     mdb.setParameters(db.getParameters());
+     mdb.setOwnerName(db.getOwnerName());
+     PrincipalType ownerType = db.getOwnerType();
+     mdb.setOwnerType((null == ownerType ? PrincipalType.USER.name() : ownerType.name()));
+     try {
+       openTransaction();
+       pm.makePersistent(mdb);
+       commited = commitTransaction();
+     } finally {
+       if (!commited) {
+         rollbackTransaction();
+       }
+     }
+   }
+ 
+   @SuppressWarnings("nls")
+   private MDatabase getMDatabase(String catName, String name) throws NoSuchObjectException {
+     MDatabase mdb = null;
+     boolean commited = false;
+     Query query = null;
+     try {
+       openTransaction();
+       name = normalizeIdentifier(name);
+       catName = normalizeIdentifier(catName);
+       query = pm.newQuery(MDatabase.class, "name == dbname && catalogName == catname");
+       query.declareParameters("java.lang.String dbname, java.lang.String catname");
+       query.setUnique(true);
+       mdb = (MDatabase) query.execute(name, catName);
+       pm.retrieve(mdb);
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     if (mdb == null) {
+       throw new NoSuchObjectException("There is no database " + catName + "." + name);
+     }
+     return mdb;
+   }
+ 
+   @Override
+   public Database getDatabase(String catalogName, String name) throws NoSuchObjectException {
+     MetaException ex = null;
+     Database db = null;
+     try {
+       db = getDatabaseInternal(catalogName, name);
+     } catch (MetaException e) {
+       // Signature restriction to NSOE, and NSOE being a flat exception prevents us from
+       // setting the cause of the NSOE as the MetaException. We should not lose the info
+       // we got here, but it's very likely that the MetaException is irrelevant and is
+       // actually an NSOE message, so we should log it and throw an NSOE with the msg.
+       ex = e;
+     }
+     if (db == null) {
+       LOG.warn("Failed to get database {}.{}, returning NoSuchObjectException",
+           catalogName, name, ex);
+       throw new NoSuchObjectException(name + (ex == null ? "" : (": " + ex.getMessage())));
+     }
+     return db;
+   }
+ 
+   public Database getDatabaseInternal(String catalogName, String name)
+       throws MetaException, NoSuchObjectException {
+     return new GetDbHelper(catalogName, name, true, true) {
+       @Override
+       protected Database getSqlResult(GetHelper<Database> ctx) throws MetaException {
+         return directSql.getDatabase(catalogName, dbName);
+       }
+ 
+       @Override
+       protected Database getJdoResult(GetHelper<Database> ctx) throws MetaException, NoSuchObjectException {
+         return getJDODatabase(catalogName, dbName);
+       }
+     }.run(false);
+    }
+ 
+   public Database getJDODatabase(String catName, String name) throws NoSuchObjectException {
+     MDatabase mdb = null;
+     boolean commited = false;
+     try {
+       openTransaction();
+       mdb = getMDatabase(catName, name);
+       commited = commitTransaction();
+     } finally {
+       if (!commited) {
+         rollbackTransaction();
+       }
+     }
+     Database db = new Database();
+     db.setName(mdb.getName());
+     db.setDescription(mdb.getDescription());
+     db.setLocationUri(mdb.getLocationUri());
+     db.setParameters(convertMap(mdb.getParameters()));
+     db.setOwnerName(mdb.getOwnerName());
+     String type = org.apache.commons.lang.StringUtils.defaultIfBlank(mdb.getOwnerType(), null);
+     PrincipalType principalType = (type == null) ? null : PrincipalType.valueOf(type);
+     db.setOwnerType(principalType);
+     db.setCatalogName(catName);
+     return db;
+   }
+ 
+   /**
+    * Alter the database object in metastore. Currently only the parameters
+    * of the database or the owner can be changed.
+    * @param dbName the database name
+    * @param db the Hive Database object
+    * @throws MetaException
+    * @throws NoSuchObjectException
+    */
+   @Override
+   public boolean alterDatabase(String catName, String dbName, Database db)
+     throws MetaException, NoSuchObjectException {
+ 
+     MDatabase mdb = null;
+     boolean committed = false;
+     try {
+       mdb = getMDatabase(catName, dbName);
+       mdb.setParameters(db.getParameters());
+       mdb.setOwnerName(db.getOwnerName());
+       if (db.getOwnerType() != null) {
+         mdb.setOwnerType(db.getOwnerType().name());
+       }
+       if (org.apache.commons.lang.StringUtils.isNotBlank(db.getDescription())) {
+         mdb.setDescription(db.getDescription());
+       }
+       if (org.apache.commons.lang.StringUtils.isNotBlank(db.getLocationUri())) {
+         mdb.setLocationUri(db.getLocationUri());
+       }
+       openTransaction();
+       pm.makePersistent(mdb);
+       committed = commitTransaction();
+     } finally {
+       if (!committed) {
+         rollbackTransaction();
+         return false;
+       }
+     }
+     return true;
+   }
+ 
+   @Override
+   public boolean dropDatabase(String catName, String dbname)
+       throws NoSuchObjectException, MetaException {
+     boolean success = false;
+     LOG.info("Dropping database {}.{} along with all tables", catName, dbname);
+     dbname = normalizeIdentifier(dbname);
+     catName = normalizeIdentifier(catName);
+     QueryWrapper queryWrapper = new QueryWrapper();
+     try {
+       openTransaction();
+ 
+       // then drop the database
+       MDatabase db = getMDatabase(catName, dbname);
+       pm.retrieve(db);
+       if (db != null) {
+         List<MDBPrivilege> dbGrants = this.listDatabaseGrants(catName, dbname, null, queryWrapper);
+         if (CollectionUtils.isNotEmpty(dbGrants)) {
+           pm.deletePersistentAll(dbGrants);
+         }
+         pm.deletePersistent(db);
+       }
+       success = commitTransaction();
+     } finally {
+       rollbackAndCleanup(success, queryWrapper);
+     }
+     return success;
+   }
+ 
+   @Override
+   public List<String> getDatabases(String catName, String pattern) throws MetaException {
+     if (pattern == null || pattern.equals("*")) {
+       return getAllDatabases(catName);
+     }
+     boolean commited = false;
+     List<String> databases = null;
+     Query query = null;
+     try {
+       openTransaction();
+       // Take the pattern and split it on the | to get all the composing
+       // patterns
+       String[] subpatterns = pattern.trim().split("\\|");
+       StringBuilder filterBuilder = new StringBuilder();
+       List<String> parameterVals = new ArrayList<>(subpatterns.length);
+       appendSimpleCondition(filterBuilder, "catalogName", new String[] {catName}, parameterVals);
+       appendPatternCondition(filterBuilder, "name", subpatterns, parameterVals);
+       query = pm.newQuery(MDatabase.class, filterBuilder.toString());
+       query.setResult("name");
+       query.setOrdering("name ascending");
+       Collection<String> names = (Collection<String>) query.executeWithArray(parameterVals.toArray(new String[0]));
+       databases = new ArrayList<>(names);
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     return databases;
+   }
+ 
+   @Override
+   public List<String> getAllDatabases(String catName) throws MetaException {
+     boolean commited = false;
+     List<String> databases = null;
+ 
+     Query query = null;
+     catName = normalizeIdentifier(catName);
+ 
+     openTransaction();
+     try {
+       query = pm.newQuery("select name from org.apache.hadoop.hive.metastore.model.MDatabase " +
+           "where catalogName == catname");
+       query.declareParameters("java.lang.String catname");
+       query.setResult("name");
+       databases = new ArrayList<>((Collection<String>) query.execute(catName));
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     Collections.sort(databases);
+     return databases;
+   }
+ 
+   private MType getMType(Type type) {
+     List<MFieldSchema> fields = new ArrayList<>();
+     if (type.getFields() != null) {
+       for (FieldSchema field : type.getFields()) {
+         fields.add(new MFieldSchema(field.getName(), field.getType(), field
+             .getComment()));
+       }
+     }
+     return new MType(type.getName(), type.getType1(), type.getType2(), fields);
+   }
+ 
+   private Type getType(MType mtype) {
+     List<FieldSchema> fields = new ArrayList<>();
+     if (mtype.getFields() != null) {
+       for (MFieldSchema field : mtype.getFields()) {
+         fields.add(new FieldSchema(field.getName(), field.getType(), field
+             .getComment()));
+       }
+     }
+     Type ret = new Type();
+     ret.setName(mtype.getName());
+     ret.setType1(mtype.getType1());
+     ret.setType2(mtype.getType2());
+     ret.setFields(fields);
+     return ret;
+   }
+ 
+   @Override
+   public boolean createType(Type type) {
+     boolean success = false;
+     MType mtype = getMType(type);
+     boolean commited = false;
+     try {
+       openTransaction();
+       pm.makePersistent(mtype);
+       commited = commitTransaction();
+       success = true;
+     } finally {
+       if (!commited) {
+         rollbackTransaction();
+       }
+     }
+     return success;
+   }
+ 
+   @Override
+   public Type getType(String typeName) {
+     Type type = null;
+     boolean commited = false;
+     Query query = null;
+     try {
+       openTransaction();
+       query = pm.newQuery(MType.class, "name == typeName");
+       query.declareParameters("java.lang.String typeName");
+       query.setUnique(true);
+       MType mtype = (MType) query.execute(typeName.trim());
+       pm.retrieve(type);
+       if (mtype != null) {
+         type = getType(mtype);
+       }
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     return type;
+   }
+ 
+   @Override
+   public boolean dropType(String typeName) {
+     boolean success = false;
+     Query query = null;
+     try {
+       openTransaction();
+       query = pm.newQuery(MType.class, "name == typeName");
+       query.declareParameters("java.lang.String typeName");
+       query.setUnique(true);
+       MType type = (MType) query.execute(typeName.trim());
+       pm.retrieve(type);
+       if (type != null) {
+         pm.deletePersistent(type);
+       }
+       success = commitTransaction();
+     } catch (JDOObjectNotFoundException e) {
+       success = commitTransaction();
+       LOG.debug("type not found {}", typeName, e);
+     } finally {
+       rollbackAndCleanup(success, query);
+     }
+     return success;
+   }
+ 
+   @Override
+   public List<String> createTableWithConstraints(Table tbl,
+     List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys,
+     List<SQLUniqueConstraint> uniqueConstraints, List<SQLNotNullConstraint> notNullConstraints,
+     List<SQLDefaultConstraint> defaultConstraints, List<SQLCheckConstraint> checkConstraints)
+     throws InvalidObjectException, MetaException {
+     boolean success = false;
+     try {
+       openTransaction();
+       createTable(tbl);
+       // Add constraints.
+       // We need not do a deep retrieval of the Table Column Descriptor while persisting the
+       // constraints since this transaction involving create table is not yet committed.
+       List<String> constraintNames = new ArrayList<>();
+       if (foreignKeys != null) {
+         constraintNames.addAll(addForeignKeys(foreignKeys, false, primaryKeys, uniqueConstraints));
+       }
+       if (primaryKeys != null) {
+         constraintNames.addAll(addPrimaryKeys(primaryKeys, false));
+       }
+       if (uniqueConstraints != null) {
+         constraintNames.addAll(addUniqueConstraints(uniqueConstraints, false));
+       }
+       if (notNullConstraints != null) {
+         constraintNames.addAll(addNotNullConstraints(notNullConstraints, false));
+       }
+       if (defaultConstraints != null) {
+         constraintNames.addAll(addDefaultConstraints(defaultConstraints, false));
+       }
+       if (checkConstraints != null) {
+         constraintNames.addAll(addCheckConstraints(checkConstraints, false));
+       }
+       success = commitTransaction();
+       return constraintNames;
+     } finally {
+       if (!success) {
+         rollbackTransaction();
+       }
+     }
+   }
+ 
+   @Override
+   public void createTable(Table tbl) throws InvalidObjectException, MetaException {
+     boolean commited = false;
++    MTable mtbl = null;
++
+     try {
+       openTransaction();
+ 
 -      MTable mtbl = convertToMTable(tbl);
++      mtbl = convertToMTable(tbl);
++      if (TxnUtils.isTransactionalTable(tbl)) {
++        mtbl.setWriteId(tbl.getWriteId());
++      }
+       pm.makePersistent(mtbl);
+ 
+       if (tbl.getCreationMetadata() != null) {
+         MCreationMetadata mcm = convertToMCreationMetadata(tbl.getCreationMetadata());
+         pm.makePersistent(mcm);
+       }
+ 
+       PrincipalPrivilegeSet principalPrivs = tbl.getPrivileges();
+       List<Object> toPersistPrivObjs = new ArrayList<>();
+       if (principalPrivs != null) {
+         int now = (int)(System.currentTimeMillis()/1000);
+ 
+         Map<String, List<PrivilegeGrantInfo>> userPrivs = principalPrivs.getUserPrivileges();
+         putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, userPrivs, PrincipalType.USER, "SQL");
+ 
+         Map<String, List<PrivilegeGrantInfo>> groupPrivs = principalPrivs.getGroupPrivileges();
+         putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, groupPrivs, PrincipalType.GROUP, "SQL");
+ 
+         Map<String, List<PrivilegeGrantInfo>> rolePrivs = principalPrivs.getRolePrivileges();
+         putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, rolePrivs, PrincipalType.ROLE, "SQL");
+       }
+       pm.makePersistentAll(toPersistPrivObjs);
+       commited = commitTransaction();
+     } finally {
+       if (!commited) {
+         rollbackTransaction();
+       }
+     }
+   }
+ 
+   /**
+    * Convert PrivilegeGrantInfo from privMap to MTablePrivilege, and add all of
+    * them to the toPersistPrivObjs. These privilege objects will be persisted as
+    * part of createTable.
+    *
+    * @param mtbl
+    * @param toPersistPrivObjs
+    * @param now
+    * @param privMap
+    * @param type
+    */
+   private void putPersistentPrivObjects(MTable mtbl, List<Object> toPersistPrivObjs,
+       int now, Map<String, List<PrivilegeGrantInfo>> privMap, PrincipalType type, String authorizer) {
+     if (privMap != null) {
+       for (Map.Entry<String, List<PrivilegeGrantInfo>> entry : privMap
+           .entrySet()) {
+         String principalName = entry.getKey();
+         List<PrivilegeGrantInfo> privs = entry.getValue();
+         for (int i = 0; i < privs.size(); i++) {
+           PrivilegeGrantInfo priv = privs.get(i);
+           if (priv == null) {
+             continue;
+           }
+           MTablePrivilege mTblSec = new MTablePrivilege(
+               principalName, type.toString(), mtbl, priv.getPrivilege(),
+               now, priv.getGrantor(), priv.getGrantorType().toString(), priv
+                   .isGrantOption(), authorizer);
+           toPersistPrivObjs.add(mTblSec);
+         }
+       }
+     }
+   }
+ 
+   @Override
+   public boolean dropTable(String catName, String dbName, String tableName)
+       throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException {
+     boolean materializedView = false;
+     boolean success = false;
+     try {
+       openTransaction();
+       MTable tbl = getMTable(catName, dbName, tableName);
+       pm.retrieve(tbl);
+       if (tbl != null) {
+         materializedView = TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType());
+         // first remove all the grants
+         List<MTablePrivilege> tabGrants = listAllTableGrants(catName, dbName, tableName);
+         if (CollectionUtils.isNotEmpty(tabGrants)) {
+           pm.deletePersistentAll(tabGrants);
+         }
+         List<MTableColumnPrivilege> tblColGrants = listTableAllColumnGrants(catName, dbName,
+             tableName);
+         if (CollectionUtils.isNotEmpty(tblColGrants)) {
+           pm.deletePersistentAll(tblColGrants);
+         }
+ 
+         List<MPartitionPrivilege> partGrants = this.listTableAllPartitionGrants(catName, dbName, tableName);
+         if (CollectionUtils.isNotEmpty(partGrants)) {
+           pm.deletePersistentAll(partGrants);
+         }
+ 
+         List<MPartitionColumnPrivilege> partColGrants = listTableAllPartitionColumnGrants(catName, dbName,
+             tableName);
+         if (CollectionUtils.isNotEmpty(partColGrants)) {
+           pm.deletePersistentAll(partColGrants);
+         }
+         // delete column statistics if present
+         try {
+           deleteTableColumnStatistics(catName, dbName, tableName, null);
+         } catch (NoSuchObjectException e) {
+           LOG.info("Found no table level column statistics associated with {} to delete",
+               TableName.getQualified(catName, dbName, tableName));
+         }
+ 
++        // TODO## remove? unused
++        Table table = convertToTable(tbl);
++
+         List<MConstraint> tabConstraints = listAllTableConstraintsWithOptionalConstraintName(
+                                            catName, dbName, tableName, null);
+         if (CollectionUtils.isNotEmpty(tabConstraints)) {
+           pm.deletePersistentAll(tabConstraints);
+         }
+ 
+         preDropStorageDescriptor(tbl.getSd());
+ 
+         if (materializedView) {
+           dropCreationMetadata(tbl.getDatabase().getCatalogName(),
+               tbl.getDatabase().getName(), tbl.getTableName());
+         }
+ 
+         // then remove the table
+         pm.deletePersistentAll(tbl);
+       }
+       success = commitTransaction();
+     } finally {
+       if (!success) {
+         rollbackTransaction();
+       }
+     }
+     return success;
+   }
+ 
+   private boolean dropCreationMetadata(String catName, String dbName, String tableName) throws MetaException,
+       NoSuchObjectException, InvalidObjectException, InvalidInputException {
+     boolean success = false;
+     dbName = normalizeIdentifier(dbName);
+     tableName = normalizeIdentifier(tableName);
+     try {
+       openTransaction();
+       MCreationMetadata mcm = getCreationMetadata(catName, dbName, tableName);
+       pm.retrieve(mcm);
+       if (mcm != null) {
+         pm.deletePersistentAll(mcm);
+       }
+       success = commitTransaction();
+     } finally {
+       if (!success) {
+         rollbackTransaction();
+       }
+     }
+     return success;
+   }
+ 
+   private List<MConstraint> listAllTableConstraintsWithOptionalConstraintName(
+       String catName, String dbName, String tableName, String constraintname) {
+     catName = normalizeIdentifier(catName);
+     dbName = normalizeIdentifier(dbName);
+     tableName = normalizeIdentifier(tableName);
+     constraintname = constraintname!=null?normalizeIdentifier(constraintname):null;
+     List<MConstraint> mConstraints = null;
+     List<String> constraintNames = new ArrayList<>();
+     Query query = null;
+ 
+     try {
+       query = pm.newQuery("select constraintName from org.apache.hadoop.hive.metastore.model.MConstraint  where "
+         + "((parentTable.tableName == ptblname && parentTable.database.name == pdbname && " +
+               "parentTable.database.catalogName == pcatname) || "
+         + "(childTable != null && childTable.tableName == ctblname &&" +
+               "childTable.database.name == cdbname && childTable.database.catalogName == ccatname)) " +
+           (constraintname != null ? " && constraintName == constraintname" : ""));
+       query.declareParameters("java.lang.String ptblname, java.lang.String pdbname,"
+           + "java.lang.String pcatname, java.lang.String ctblname, java.lang.String cdbname," +
+           "java.lang.String ccatname" +
+         (constraintname != null ? ", java.lang.String constraintname" : ""));
+       Collection<?> constraintNamesColl =
+         constraintname != null ?
+           ((Collection<?>) query.
+             executeWithArray(tableName, dbName, catName, tableName, dbName, catName, constraintname)):
+           ((Collection<?>) query.
+             executeWithArray(tableName, dbName, catName, tableName, dbName, catName));
+       for (Iterator<?> i = constraintNamesColl.iterator(); i.hasNext();) {
+         String currName = (String) i.next();
+         constraintNames.add(currName);
+       }
+       query = pm.newQuery(MConstraint.class);
+       query.setFilter("param.contains(constraintName)");
+       query.declareParameters("java.util.Collection param");
+       Collection<?> constraints = (Collection<?>)query.execute(constraintNames);
+       mConstraints = new ArrayList<>();
+       for (Iterator<?> i = constraints.iterator(); i.hasNext();) {
+         MConstraint currConstraint = (MConstraint) i.next();
+         mConstraints.add(currConstraint);
+       }
+     } finally {
+       if (query != null) {
+         query.closeAll();
+       }
+     }
+     return mConstraints;
+   }
+ 
++  private static String getFullyQualifiedTableName(String dbName, String tblName) {
++    return ((dbName == null || dbName.isEmpty()) ? "" : "\"" + dbName + "\".\"")
++        + "\"" + tblName + "\"";
++  }
++
++  @Override
++  public Table
++  getTable(String catName, String dbName, String tableName)
++      throws MetaException {
++    return getTable(catName, dbName, tableName, -1, null);
++  }
++
+   @Override
 -  public Table getTable(String catName, String dbName, String tableName) throws MetaException {
++  public Table getTable(String catName, String dbName, String tableName,
++                        long txnId, String writeIdList)
++      throws MetaException {
+     boolean commited = false;
+     Table tbl = null;
+     try {
+       openTransaction();
 -      tbl = convertToTable(getMTable(catName, dbName, tableName));
++      MTable mtable = getMTable(catName, dbName, tableName);
++      tbl = convertToTable(mtable);
+       // Retrieve creation metadata if needed
+       if (tbl != null && TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType())) {
+         tbl.setCreationMetadata(
 -            convertToCreationMetadata(getCreationMetadata(catName, dbName, tableName)));
++                convertToCreationMetadata(getCreationMetadata(catName, dbName, tableName)));
++      }
++
++      // If transactional non partitioned table,
++      // check whether the current version table statistics
++      // in the metastore comply with the client query's snapshot isolation.
++      // Note: a partitioned table has table stats and table snapshot in MPartiiton.
++      if (writeIdList != null) {
++        boolean isTxn = tbl != null && TxnUtils.isTransactionalTable(tbl);
++        if (isTxn && !areTxnStatsSupported) {
++          StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE);
++          LOG.info("Removed COLUMN_STATS_ACCURATE from Table's parameters.");
++        } else if (isTxn && tbl.getPartitionKeysSize() == 0) {
++          if (isCurrentStatsValidForTheQuery(mtable, txnId, writeIdList, false)) {
++            tbl.setIsStatsCompliant(true);
++          } else {
++            tbl.setIsStatsCompliant(false);
++            // Do not make persistent the following state since it is the query specific (not global).
++            StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE);
++            LOG.info("Removed COLUMN_STATS_ACCURATE from Table's parameters.");
++          }
++        }
+       }
+       commited = commitTransaction();
+     } finally {
+       if (!commited) {
+         rollbackTransaction();
+       }
+     }
+     return tbl;
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String pattern)
+       throws MetaException {
+     return getTables(catName, dbName, pattern, null);
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String pattern, TableType tableType)
+       throws MetaException {
+     try {
+       // We only support pattern matching via jdo since pattern matching in Java
+       // might be different than the one used by the metastore backends
+       return getTablesInternal(catName, dbName, pattern, tableType,
+           (pattern == null || pattern.equals(".*")), true);
+     } catch (NoSuchObjectException e) {
+       throw new MetaException(ExceptionUtils.getStackTrace(e));
+     }
+   }
+ 
+   @Override
+   public List<TableName> getTableNamesWithStats() throws MetaException, NoSuchObjectException {
+     return new GetListHelper<TableName>(null, null, null, true, false) {
+       @Override
+       protected List<TableName> getSqlResult(
+           GetHelper<List<TableName>> ctx) throws MetaException {
+         return directSql.getTableNamesWithStats();
+       }
+ 
+       @Override
+       protected List<TableName> getJdoResult(
+           GetHelper<List<TableName>> ctx) throws MetaException {
+         throw new UnsupportedOperationException("UnsupportedOperationException"); // TODO: implement?
+       }
+     }.run(false);
+   }
+ 
+   @Override
+   public Map<String, List<String>> getPartitionColsWithStats(String catName, String dbName, String tableName)
+       throws MetaException, NoSuchObjectException {
+     return new GetHelper<Map<String, List<String>>>(catName, dbName, null, true, false) {
+       @Override
+       protected Map<String, List<String>> getSqlResult(
+           GetHelper<Map<String, List<String>>> ctx) throws MetaException {
+         try {
+           return directSql.getColAndPartNamesWithStats(catName, dbName, tableName);
+         } catch (Throwable ex) {
+           LOG.error("DirectSQL failed", ex);
+           throw new MetaException(ex.getMessage());
+         }
+       }
+ 
+       @Override
+       protected Map<String, List<String>> getJdoResult(
+           GetHelper<Map<String, List<String>>> ctx) throws MetaException {
+         throw new UnsupportedOperationException("UnsupportedOperationException"); // TODO: implement?
+       }
+ 
+       @Override
+       protected String describeResult() {
+         return results.size() + " partitions";
+       }
+     }.run(false);
+   }
+ 
+   @Override
+   public List<TableName> getAllTableNamesForStats() throws MetaException, NoSuchObjectException {
+     return new GetListHelper<TableName>(null, null, null, true, false) {
+       @Override
+       protected List<TableName> getSqlResult(
+           GetHelper<List<TableName>> ctx) throws MetaException {
+         return directSql.getAllTableNamesForStats();
+       }
+ 
+       @Override
+       protected List<TableName> getJdoResult(
+           GetHelper<List<TableName>> ctx) throws MetaException {
+         boolean commited = false;
+         Query query = null;
+         List<TableName> result = new ArrayList<>();
+         openTransaction();
+         try {
+           String paramStr = "", whereStr = "";
+           for (int i = 0; i < MetaStoreDirectSql.STATS_TABLE_TYPES.length; ++i) {
+             if (i != 0) {
+               paramStr += ", ";
+               whereStr += "||";
+             }
+             paramStr += "java.lang.String tt" + i;
+             whereStr += " tableType == tt" + i;
+           }
+           query = pm.newQuery(MTable.class, whereStr);
+           query.declareParameters(paramStr);
+           @SuppressWarnings("unchecked")
+           Collection<MTable> tbls = (Collection<MTable>) query.executeWithArray(
+               query, MetaStoreDirectSql.STATS_TABLE_TYPES);
+           pm.retrieveAll(tbls);
+           for (MTable tbl : tbls) {
+             result.add(new TableName(
+                 tbl.getDatabase().getCatalogName(), tbl.getDatabase().getName(), tbl.getTableName()));
+           }
+           commited = commitTransaction();
+         } finally {
+           rollbackAndCleanup(commited, query);
+         }
+         return result;
+       }
+     }.run(false);
+   }
+ 
+   protected List<String> getTablesInternal(String catName, String dbName, String pattern,
+                                            TableType tableType, boolean allowSql, boolean allowJdo)
+       throws MetaException, NoSuchObjectException {
+     final String db_name = normalizeIdentifier(dbName);
+     final String cat_name = normalizeIdentifier(catName);
+     return new GetListHelper<String>(cat_name, dbName, null, allowSql, allowJdo) {
+       @Override
+       protected List<String> getSqlResult(GetHelper<List<String>> ctx)
+               throws MetaException {
+         return directSql.getTables(cat_name, db_name, tableType);
+       }
+ 
+       @Override
+       protected List<String> getJdoResult(GetHelper<List<String>> ctx)
+               throws MetaException, NoSuchObjectException {
+         return getTablesInternalViaJdo(cat_name, db_name, pattern, tableType);
+       }
+     }.run(false);
+   }
+ 
+   private List<String> getTablesInternalViaJdo(String catName, String dbName, String pattern,
+                                                TableType tableType) throws MetaException {
+     boolean commited = false;
+     Query query = null;
+     List<String> tbls = null;
+     try {
+       openTransaction();
+       dbName = normalizeIdentifier(dbName);
+       // Take the pattern and split it on the | to get all the composing
+       // patterns
+       List<String> parameterVals = new ArrayList<>();
+       StringBuilder filterBuilder = new StringBuilder();
+       //adds database.name == dbName to the filter
+       appendSimpleCondition(filterBuilder, "database.name", new String[] {dbName}, parameterVals);
+       appendSimpleCondition(filterBuilder, "database.catalogName", new String[] {catName}, parameterVals);
+       if(pattern != null) {
+         appendPatternCondition(filterBuilder, "tableName", pattern, parameterVals);
+       }
+       if(tableType != null) {
+         appendPatternCondition(filterBuilder, "tableType", new String[] {tableType.toString()}, parameterVals);
+       }
+ 
+       query = pm.newQuery(MTable.class, filterBuilder.toString());
+       query.setResult("tableName");
+       query.setOrdering("tableName ascending");
+       Collection<String> names = (Collection<String>) query.executeWithArray(parameterVals.toArray(new String[0]));
+       tbls = new ArrayList<>(names);
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     return tbls;
+   }
+ 
+   @Override
+   public List<String> getMaterializedViewsForRewriting(String catName, String dbName)
+       throws MetaException, NoSuchObjectException {
+     final String db_name = normalizeIdentifier(dbName);
+     catName = normalizeIdentifier(catName);
+     boolean commited = false;
+     Query<?> query = null;
+     List<String> tbls = null;
+     try {
+       openTransaction();
+       dbName = normalizeIdentifier(dbName);
+       query = pm.newQuery(MTable.class,
+           "database.name == db && database.catalogName == cat && tableType == tt && rewriteEnabled == re");
+       query.declareParameters(
+           "java.lang.String db, java.lang.String cat, java.lang.String tt, boolean re");
+       query.setResult("tableName");
+       Collection<String> names = (Collection<String>) query.executeWithArray(
+           db_name, catName, TableType.MATERIALIZED_VIEW.toString(), true);
+       tbls = new ArrayList<>(names);
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     return tbls;
+   }
+ 
+   @Override
+   public int getDatabaseCount() throws MetaException {
+     return getObjectCount("name", MDatabase.class.getName());
+   }
+ 
+   @Override
+   public int getPartitionCount() throws MetaException {
+     return getObjectCount("partitionName", MPartition.class.getName());
+   }
+ 
+   @Override
+   public int getTableCount() throws MetaException {
+     return getObjectCount("tableName", MTable.class.getName());
+   }
+ 
+   private int getObjectCount(String fieldName, String objName) {
+     Long result = 0L;
+     boolean commited = false;
+     Query query = null;
+     try {
+       openTransaction();
+       String queryStr =
+         "select count(" + fieldName + ") from " + objName;
+       query = pm.newQuery(queryStr);
+       result = (Long) query.execute();
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     return result.intValue();
+   }
+ 
+   @Override
+   public List<TableMeta> getTableMeta(String catName, String dbNames, String tableNames,
+                                       List<String> tableTypes) throws MetaException {
+ 
+     boolean commited = false;
+     Query query = null;
+     List<TableMeta> metas = new ArrayList<>();
+     try {
+       openTransaction();
+       // Take the pattern and split it on the | to get all the composing
+       // patterns
+       StringBuilder filterBuilder = new StringBuilder();
+       List<String> parameterVals = new ArrayList<>();
+       appendSimpleCondition(filterBuilder, "database.catalogName", new String[] {catName}, parameterVals);
+       if (dbNames != null && !dbNames.equals("*")) {
+         appendPatternCondition(filterBuilder, "database.name", dbNames, parameterVals);
+       }
+       if (tableNames != null && !tableNames.equals("*")) {
+         appendPatternCondition(filterBuilder, "tableName", tableNames, parameterVals);
+       }
+       if (tableTypes != null && !tableTypes.isEmpty()) {
+         appendSimpleCondition(filterBuilder, "tableType", tableTypes.toArray(new String[0]), parameterVals);
+       }
+ 
+       if (LOG.isDebugEnabled()) {
+         LOG.debug("getTableMeta with filter " + filterBuilder.toString() + " params: " +
+             StringUtils.join(parameterVals, ", "));
+       }
+       query = pm.newQuery(MTable.class, filterBuilder.toString());
+       Collection<MTable> tables = (Collection<MTable>) query.executeWithArray(parameterVals.toArray(new String[parameterVals.size()]));
+       for (MTable table : tables) {
+         TableMeta metaData = new TableMeta(
+             table.getDatabase().getName(), table.getTableName(), table.getTableType());
+         metaData.setComments(table.getParameters().get("comment"));
+         metas.add(metaData);
+       }
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     return metas;
+   }
+ 
+   private StringBuilder appendPatternCondition(StringBuilder filterBuilder, String fieldName,
+       String[] elements, List<String> parameterVals) {
+     return appendCondition(filterBuilder, fieldName, elements, true, parameterVals);
+   }
+ 
+   private StringBuilder appendPatternCondition(StringBuilder builder,
+       String fieldName, String elements, List<String> parameters) {
+       elements = normalizeIdentifier(elements);
+     return appendCondition(builder, fieldName, elements.split("\\|"), true, parameters);
+   }
+ 
+   private StringBuilder appendSimpleCondition(StringBuilder builder,
+       String fieldName, String[] elements, List<String> parameters) {
+     return appendCondition(builder, fieldName, elements, false, parameters);
+   }
+ 
+   private StringBuilder appendCondition(StringBuilder builder,
+       String fieldName, String[] elements, boolean pattern, List<String> parameters) {
+     if (builder.length() > 0) {
+       builder.append(" && ");
+     }
+     builder.append(" (");
+     int length = builder.length();
+     for (String element : elements) {
+       if (pattern) {
+         element = "(?i)" + element.replaceAll("\\*", ".*");
+       }
+       parameters.add(element);
+       if (builder.length() > length) {
+         builder.append(" || ");
+       }
+       builder.append(fieldName);
+       if (pattern) {
+         builder.append(".matches(").append(JDO_PARAM).append(parameters.size()).append(")");
+       } else {
+         builder.append(" == ").append(JDO_PARAM).append(parameters.size());
+       }
+     }
+     builder.append(" )");
+     return builder;
+   }
+ 
+   @Override
+   public List<String> getAllTables(String catName, String dbName) throws MetaException {
+     return getTables(catName, dbName, ".*");
+   }
+ 
+   class AttachedMTableInfo {
+     MTable mtbl;
+     MColumnDescriptor mcd;
+ 
+     public AttachedMTableInfo() {}
+ 
+     public AttachedMTableInfo(MTable mtbl, MColumnDescriptor mcd) {
+       this.mtbl = mtbl;
+       this.mcd = mcd;
+     }
+   }
+ 
+   private AttachedMTableInfo getMTable(String catName, String db, String table,
+                                        boolean retrieveCD) {
+     AttachedMTableInfo nmtbl = new AttachedMTableInfo();
+     MTable mtbl = null;
+     boolean commited = false;
+     Query query = null;
+     try {
+       openTransaction();
+       catName = normalizeIdentifier(catName);
+       db = normalizeIdentifier(db);
+       table = normalizeIdentifier(table);
+       query = pm.newQuery(MTable.class,
+           "tableName == table && database.name == db && database.catalogName == catname");
+       query.declareParameters(
+           "java.lang.String table, java.lang.String db, java.lang.String catname");
+       query.setUnique(true);
+       LOG.debug("Executing getMTable for " +
+           TableName.getQualified(catName, db, table));
+       mtbl = (MTable) query.execute(table, db, catName);
+       pm.retrieve(mtbl);
+       // Retrieving CD can be expensive and unnecessary, so do it only when required.
+       if (mtbl != null && retrieveCD) {
+         pm.retrieve(mtbl.getSd());
+         pm.retrieveAll(mtbl.getSd().getCD());
+         nmtbl.mcd = mtbl.getSd().getCD();
+       }
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     nmtbl.mtbl = mtbl;
+     return nmtbl;
+   }
+ 
+   private MCreationMetadata getCreationMetadata(String catName, String dbName, String tblName) {
+     boolean commited = false;
+     MCreationMetadata mcm = null;
+     Query query = null;
+     try {
+       openTransaction();
+       query = pm.newQuery(
+           MCreationMetadata.class, "tblName == table && dbName == db && catalogName == cat");
+       query.declareParameters("java.lang.String table, java.lang.String db, java.lang.String cat");
+       query.setUnique(true);
+       mcm = (MCreationMetadata) query.execute(tblName, dbName, catName);
+       pm.retrieve(mcm);
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     return mcm;
+   }
+ 
+   private MTable getMTable(String catName, String db, String table) {
+     AttachedMTableInfo nmtbl = getMTable(catName, db, table, false);
+     return nmtbl.mtbl;
+   }
+ 
+   @Override
+   public List<Table> getTableObjectsByName(String catName, String db, List<String> tbl_names)
+       throws MetaException, UnknownDBException {
+     List<Table> tables = new ArrayList<>();
+     boolean committed = false;
+     Query dbExistsQuery = null;
+     Query query = null;
+     try {
+       openTransaction();
+       db = normalizeIdentifier(db);
+       catName = normalizeIdentifier(catName);
+ 
+       List<String> lowered_tbl_names = new ArrayList<>(tbl_names.size());
+       for (String t : tbl_names) {
+         lowered_tbl_names.add(normalizeIdentifier(t));
+       }
+       query = pm.newQuery(MTable.class);
+       query.setFilter("database.name == db && database.catalogName == cat && tbl_names.contains(tableName)");
+       query.declareParameters("java.lang.String db, java.lang.String cat, java.util.Collection tbl_names");
+       Collection mtables = (Collection) query.execute(db, catName, lowered_tbl_names);
+       if (mtables == null || mtables.isEmpty()) {
+         // Need to differentiate between an unmatched pattern and a non-existent database
+         dbExistsQuery = pm.newQuery(MDatabase.class, "name == db && catalogName == cat");
+         dbExistsQuery.declareParameters("java.lang.String db, java.lang.String cat");
+         dbExistsQuery.setUnique(true);
+         dbExistsQuery.setResult("name");
+         String dbNameIfExists = (String) dbExistsQuery.execute(db, catName);
+         if (org.apache.commons.lang.StringUtils.isEmpty(dbNameIfExists)) {
+           throw new UnknownDBException("Could not find database " +
+               DatabaseName.getQualified(catName, db));
+         }
+       } else {
+         for (Iterator iter = mtables.iterator(); iter.hasNext(); ) {
+           Table tbl = convertToTable((MTable) iter.next());
+           // Retrieve creation metadata if needed
+           if (TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType())) {
+             tbl.setCreationMetadata(
+                 convertToCreationMetadata(
+                     getCreationMetadata(tbl.getCatName(), tbl.getDbName(), tbl.getTableName())));
+           }
+           tables.add(tbl);
+         }
+       }
+       committed = commitTransaction();
+     } finally {
+       rollbackAndCleanup(committed, query);
+       if (dbExistsQuery != null) {
+         dbExistsQuery.closeAll();
+       }
+     }
+     return tables;
+   }
+ 
+   /** Makes shallow copy of a list to avoid DataNucleus mucking with our objects. */
+   private <T> List<T> convertList(List<T> dnList) {
+     return (dnList == null) ? null : Lists.newArrayList(dnList);
+   }
+ 
+   /** Makes shallow copy of a map to avoid DataNucleus mucking with our objects. */
+   private Map<String, String> convertMap(Map<String, String> dnMap) {
+     return MetaStoreUtils.trimMapNulls(dnMap,
+         MetastoreConf.getBoolVar(getConf(), ConfVars.ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS));
+   }
+ 
+   private Table convertToTable(MTable mtbl) throws MetaException {
+     if (mtbl == null) {
+       return null;
+     }
+     String tableType = mtbl.getTableType();
+     if (tableType == null) {
+       // for backwards compatibility with old metastore persistence
+       if (mtbl.getViewOriginalText() != null) {
+         tableType = TableType.VIRTUAL_VIEW.toString();
+       } else if (Boolean.parseBoolean(mtbl.getParameters().get("EXTERNAL"))) {
+         tableType = TableType.EXTERNAL_TABLE.toString();
+       } else {
+         tableType = TableType.MANAGED_TABLE.toString();
+       }
+     }
+     final Table t = new Table(mtbl.getTableName(), mtbl.getDatabase().getName(), mtbl
+         .getOwner(), mtbl.getCreateTime(), mtbl.getLastAccessTime(), mtbl
+         .getRetention(), convertToStorageDescriptor(mtbl.getSd()),
+         convertToFieldSchemas(mtbl.getPartitionKeys()), convertMap(mtbl.getParameters()),
+         mtbl.getViewOriginalText(), mtbl.getViewExpandedText(), tableType);
+ 
+     if (Strings.isNullOrEmpty(mtbl.getOwnerType())) {
+       // Before the ownerType exists in an old Hive schema, USER was the default type for owner.
+       // Let's set the default to USER to keep backward compatibility.
+       t.setOwnerType(PrincipalType.USER);
+     } else {
+       t.setOwnerType(PrincipalType.valueOf(mtbl.getOwnerType()));
+     }
+ 
+     t.setRewriteEnabled(mtbl.isRewriteEnabled());
+     t.setCatName(mtbl.getDatabase().getCatalogName());
++    t.setWriteId(mtbl.getWriteId());
+     return t;
+   }
+ 
+   private MTable convertToMTable(Table tbl) throws InvalidObjectException,
+       MetaException {
++    // NOTE: we don't set writeId in this method. Write ID is only set after validating the
++    //       existing write ID against the caller's valid list.
+     if (tbl == null) {
+       return null;
+     }
+     MDatabase mdb = null;
+     String catName = tbl.isSetCatName() ? tbl.getCatName() : getDefaultCatalog(conf);
+     try {
+       mdb = getMDatabase(catName, tbl.getDbName());
+     } catch (NoSuchObjectException e) {
+       LOG.error("Could not convert to MTable", e);
+       throw new InvalidObjectException("Database " +
+           DatabaseName.getQualified(catName, tbl.getDbName()) + " doesn't exist.");
+     }
+ 
+     // If the table has property EXTERNAL set, update table type
+     // accordingly
+     String tableType = tbl.getTableType();
+     boolean isExternal = Boolean.parseBoolean(tbl.getParameters().get("EXTERNAL"));
+     if (TableType.MANAGED_TABLE.toString().equals(tableType)) {
+       if (isExternal) {
+         tableType = TableType.EXTERNAL_TABLE.toString();
+       }
+     }
+     if (TableType.EXTERNAL_TABLE.toString().equals(tableType)) {
+       if (!isExternal) {
+         tableType = TableType.MANAGED_TABLE.toString();
+       }
+     }
+ 
+     PrincipalType ownerPrincipalType = tbl.getOwnerType();
+     String ownerType = (ownerPrincipalType == null) ? PrincipalType.USER.name() : ownerPrincipalType.name();
+ 
+     // A new table is always created with a new column descriptor
 -    return new MTable(normalizeIdentifier(tbl.getTableName()), mdb,
++    MTable mtable = new MTable(normalizeIdentifier(tbl.getTableName()), mdb,
+         convertToMStorageDescriptor(tbl.getSd()), tbl.getOwner(), ownerType, tbl
+         .getCreateTime(), tbl.getLastAccessTime(), tbl.getRetention(),
+         convertToMFieldSchemas(tbl.getPartitionKeys()), tbl.getParameters(),
+         tbl.getViewOriginalText(), tbl.getViewExpandedText(), tbl.isRewriteEnabled(),
+         tableType);
++    return mtable;
+   }
+ 
+   private List<MFieldSchema> convertToMFieldSchemas(List<FieldSchema> keys) {
+     List<MFieldSchema> mkeys = null;
+     if (keys != null) {
+       mkeys = new ArrayList<>(keys.size());
+       for (FieldSchema part : keys) {
+         mkeys.add(new MFieldSchema(part.getName().toLowerCase(),
+             part.getType(), part.getComment()));
+       }
+     }
+     return mkeys;
+   }
+ 
+   private List<FieldSchema> convertToFieldSchemas(List<MFieldSchema> mkeys) {
+     List<FieldSchema> keys = null;
+     if (mkeys != null) {
+       keys = new ArrayList<>(mkeys.size());
+       for (MFieldSchema part : mkeys) {
+         keys.add(new FieldSchema(part.getName(), part.getType(), part
+             .getComment()));
+       }
+     }
+     return keys;
+   }
+ 
+   private List<MOrder> convertToMOrders(List<Order> keys) {
+     List<MOrder> mkeys = null;
+     if (keys != null) {
+       mkeys = new ArrayList<>(keys.size());
+       for (Order part : keys) {
+         mkeys.add(new MOrder(normalizeIdentifier(part.getCol()), part.getOrder()));
+       }
+     }
+     return mkeys;
+   }
+ 
+   private List<Order> convertToOrders(List<MOrder> mkeys) {
+     List<Order> keys = null;
+     if (mkeys != null) {
+       keys = new ArrayList<>(mkeys.size());
+       for (MOrder part : mkeys) {
+         keys.add(new Order(part.getCol(), part.getOrder()));
+       }
+     }
+     return keys;
+   }
+ 
+   private SerDeInfo convertToSerDeInfo(MSerDeInfo ms) throws MetaException {
+     if (ms == null) {
+       throw new MetaException("Invalid SerDeInfo object");
+     }
+     SerDeInfo serde =
+         new SerDeInfo(ms.getName(), ms.getSerializationLib(), convertMap(ms.getParameters()));
+     if (ms.getDescription() != null) {
+       serde.setDescription(ms.getDescription());
+     }
+     if (ms.getSerializerClass() != null) {
+       serde.setSerializerClass(ms.getSerializerClass());
+     }
+     if (ms.getDeserializerClass() != null) {
+       serde.setDeserializerClass(ms.getDeserializerClass());
+     }
+     if (ms.getSerdeType() > 0) {
+       serde.setSerdeType(SerdeType.findByValue(ms.getSerdeType()));
+     }
+     return serde;
+   }
+ 
+   private MSerDeInfo convertToMSerDeInfo(SerDeInfo ms) throws MetaException {
+     if (ms == null) {
+       throw new MetaException("Invalid SerDeInfo object");
+     }
+     return new MSerDeInfo(ms.getName(), ms.getSerializationLib(), ms.getParameters(),
+         ms.getDescription(), ms.getSerializerClass(), ms.getDeserializerClass(),
+         ms.getSerdeType() == null ? 0 : ms.

<TRUNCATED>

[08/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
index 0000000,9b79446..8270f6a
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@@ -1,0 -1,1212 +1,1249 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import org.apache.hadoop.hive.common.TableName;
+ import org.apache.hadoop.hive.metastore.api.CreationMetadata;
+ import org.apache.hadoop.hive.metastore.api.ISchemaName;
+ import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
+ import org.apache.hadoop.hive.metastore.api.Catalog;
+ import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
+ 
+ import java.nio.ByteBuffer;
+ import java.util.Collections;
+ import java.util.List;
+ import java.util.Map;
+ 
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.api.AggrStats;
+ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
+ import org.apache.hadoop.hive.metastore.api.Function;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
+ import org.apache.hadoop.hive.metastore.api.ISchema;
+ import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+ import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+ import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
+ import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+ import org.apache.hadoop.hive.metastore.api.PrincipalType;
+ import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+ import org.apache.hadoop.hive.metastore.api.WMNullablePool;
+ import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
+ import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
+ import org.apache.hadoop.hive.metastore.api.WMTrigger;
+ import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
+ import org.apache.hadoop.hive.metastore.api.Role;
+ import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
+ import org.apache.hadoop.hive.metastore.api.RuntimeStat;
+ import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+ import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+ import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
+ import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.api.TableMeta;
+ import org.apache.hadoop.hive.metastore.api.Type;
+ import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+ import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
+ import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+ import org.apache.hadoop.hive.metastore.api.WMMapping;
+ import org.apache.hadoop.hive.metastore.api.WMPool;
+ import org.apache.hadoop.hive.metastore.api.WriteEventInfo;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.thrift.TException;
+ import org.junit.Assert;
+ 
+ /**
+  *
+  * DummyRawStoreForJdoConnection.
+  *
+  * An implementation of RawStore that verifies the DummyJdoConnectionUrlHook has already been
+  * applied when this class's setConf method is called, by checking that the value of the
+  * METASTORECONNECTURLKEY ConfVar has been updated.
+  *
+  * All non-void methods return default values.
+  */
+ public class DummyRawStoreForJdoConnection implements RawStore {
+ 
+   @Override
+   public Configuration getConf() {
+ 
+     return null;
+   }
+ 
+   @Override
+   public void setConf(Configuration arg0) {
+     String expected = DummyJdoConnectionUrlHook.newUrl;
+     String actual = MetastoreConf.getVar(arg0, MetastoreConf.ConfVars.CONNECT_URL_KEY);
+ 
+     Assert.assertEquals("The expected URL used by JDO to connect to the metastore: " + expected +
+         " did not match the actual value when the Raw Store was initialized: " + actual,
+         expected, actual);
+   }
+ 
+   @Override
+   public void shutdown() {
+ 
+ 
+   }
+ 
+   @Override
+   public boolean openTransaction() {
+ 
+     return false;
+   }
+ 
+   @Override
+   public boolean commitTransaction() {
+     return false;
+   }
+ 
+   @Override
+   public boolean isActiveTransaction() {
+     return false;
+   }
+ 
+   @Override
+   public void rollbackTransaction() {
+   }
+ 
+   @Override
+   public void createCatalog(Catalog cat) throws MetaException {
+ 
+   }
+ 
+   @Override
+   public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void alterCatalog(String catName, Catalog cat) throws MetaException,
+       InvalidOperationException {
+ 
+   }
+ 
+   @Override
+   public List<String> getCatalogs() throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException {
+ 
+   }
+ 
+   @Override
+   public void createDatabase(Database db) throws InvalidObjectException, MetaException {
+ 
+ 
+   }
+ 
+   @Override
+   public Database getDatabase(String catName, String name) throws NoSuchObjectException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public boolean dropDatabase(String catName, String dbname) throws NoSuchObjectException, MetaException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public boolean alterDatabase(String catName, String dbname, Database db) throws NoSuchObjectException,
+       MetaException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public List<String> getDatabases(String catName, String pattern) throws MetaException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> getAllDatabases(String catName) throws MetaException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public boolean createType(Type type) {
+ 
+     return false;
+   }
+ 
+   @Override
+   public Type getType(String typeName) {
+ 
+     return null;
+   }
+ 
+   @Override
+   public boolean dropType(String typeName) {
+ 
+     return false;
+   }
+ 
+   @Override
+   public void createTable(Table tbl) throws InvalidObjectException, MetaException {
+ 
+ 
+   }
+ 
+   @Override
+   public boolean dropTable(String catName, String dbName, String tableName) throws MetaException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public Table getTable(String catName, String dbName, String tableName) throws MetaException {
+ 
+     return null;
+   }
+ 
+   @Override
++  public Table getTable(String catalogName, String dbName, String tableName,
++                        long txnid, String writeIdList) throws MetaException {
++    return null;
++  }
++
++  @Override
+   public boolean addPartition(Partition part) throws InvalidObjectException, MetaException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public Partition getPartition(String catName, String dbName, String tableName, List<String> part_vals)
+       throws MetaException, NoSuchObjectException {
+ 
+     return null;
+   }
+ 
+   @Override
++  public Partition getPartition(String catName, String dbName, String tableName, List<String> part_vals,
++                                long txnid, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return null;
++  }
++
++  @Override
+   public boolean dropPartition(String catName, String dbName, String tableName, List<String> part_vals)
+       throws MetaException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public List<Partition> getPartitions(String catName, String dbName, String tableName, int max)
+       throws MetaException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public Map<String, String> getPartitionLocations(String catName, String dbName, String tblName,
+       String baseLocationToNotShow, int max) {
+     return Collections.emptyMap();
+   }
+ 
+   @Override
 -  public void alterTable(String catName, String dbname, String name, Table newTable)
++  public void alterTable(String catName, String dbname, String name, Table newTable, long queryTxnId, String queryValidWriteIds)
+       throws InvalidObjectException, MetaException {
+   }
+ 
+   @Override
+   public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm)
+       throws MetaException {
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String pattern) throws MetaException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String pattern, TableType tableType) throws MetaException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> getMaterializedViewsForRewriting(String catName, String dbName)
+       throws MetaException, NoSuchObjectException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<TableMeta> getTableMeta(String catName, String dbNames, String tableNames, List<String> tableTypes)
+       throws MetaException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<Table> getTableObjectsByName(String catName, String dbname, List<String> tableNames)
+       throws MetaException, UnknownDBException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> getAllTables(String catName, String dbName) throws MetaException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> listTableNamesByFilter(String catName, String dbName, String filter, short max_tables)
+       throws MetaException, UnknownDBException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String catName, String db_name, String tbl_name, short max_parts)
+       throws MetaException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public PartitionValuesResponse listPartitionValues(String catName, String db_name,
+                                                      String tbl_name, List<FieldSchema> cols,
+                                                      boolean applyDistinct, String filter,
+                                                      boolean ascending, List<FieldSchema> order,
+                                                      long maxParts) throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void alterPartition(String catName, String db_name, String tbl_name, List<String> part_vals,
 -      Partition new_part) throws InvalidObjectException, MetaException {
++      Partition new_part, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
+   }
+ 
+   @Override
+   public void alterPartitions(String catName, String db_name, String tbl_name,
 -                              List<List<String>> part_vals_list, List<Partition> new_parts)
 -      throws InvalidObjectException, MetaException {
 -
 -
++                              List<List<String>> part_vals_list, List<Partition> new_parts,
++                              long writeId, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByFilter(String catName, String dbName, String tblName,
+                                                String filter, short maxParts)
+       throws MetaException, NoSuchObjectException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByNames(String catName, String dbName, String tblName,
+       List<String> partNames) throws MetaException, NoSuchObjectException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr,
+       String defaultPartitionName, short maxParts, List<Partition> result) throws TException {
+     return false;
+   }
+ 
+   @Override
+   public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter)
+     throws MetaException, NoSuchObjectException {
+     return -1;
+   }
+ 
+   @Override
+   public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr)
+       throws MetaException, NoSuchObjectException {
+     return -1;
+   }
+ 
+   @Override
+   public Table markPartitionForEvent(String catName, String dbName, String tblName, Map<String, String> partVals,
+       PartitionEventType evtType) throws MetaException, UnknownTableException,
+       InvalidPartitionException, UnknownPartitionException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName,
+       Map<String, String> partName, PartitionEventType evtType) throws MetaException,
+       UnknownTableException, InvalidPartitionException, UnknownPartitionException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public boolean addRole(String rowName, String ownerName) throws InvalidObjectException,
+       MetaException, NoSuchObjectException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public boolean removeRole(String roleName) throws MetaException, NoSuchObjectException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public boolean grantRole(Role role, String userName, PrincipalType principalType, String grantor,
+       PrincipalType grantorType, boolean grantOption) throws MetaException, NoSuchObjectException,
+       InvalidObjectException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public boolean revokeRole(Role role, String userName, PrincipalType principalType, boolean grantOption)
+       throws MetaException, NoSuchObjectException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List<String> groupNames)
+       throws InvalidObjectException, MetaException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getDBPrivilegeSet(String catName, String dbName, String userName,
+       List<String> groupNames) throws InvalidObjectException, MetaException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getTablePrivilegeSet(String catName, String dbName, String tableName,
+       String userName, List<String> groupNames) throws InvalidObjectException, MetaException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getPartitionPrivilegeSet(String catName, String dbName, String tableName,
+       String partition, String userName, List<String> groupNames) throws InvalidObjectException,
+       MetaException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getColumnPrivilegeSet(String catName, String dbName, String tableName,
+       String partitionName, String columnName, String userName, List<String> groupNames)
+       throws InvalidObjectException, MetaException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalGlobalGrants(String principalName,
+       PrincipalType principalType) {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalDBGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName) {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listAllTableGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName, String tableName) {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName, String tableName, List<String> partValues,
+       String partName) {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalTableColumnGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName, String tableName, String columnName) {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName, String tableName, List<String> partVals,
+       String partName, String columnName) {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectException,
+       MetaException, NoSuchObjectException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption)
+       throws InvalidObjectException, MetaException, NoSuchObjectException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, PrivilegeBag grantPrivileges)
+       throws InvalidObjectException, MetaException, NoSuchObjectException {
+     return false;
+   }
+ 
+   @Override
+   public Role getRole(String roleName) throws NoSuchObjectException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public List<String> listRoleNames() {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<Role> listRoles(String principalName, PrincipalType principalType) {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<RolePrincipalGrant> listRolesWithGrants(String principalName,
+                                                       PrincipalType principalType) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<RolePrincipalGrant> listRoleMembers(String roleName) {
+     return null;
+   }
+ 
+   @Override
+   public Partition getPartitionWithAuth(String catName, String dbName, String tblName, List<String> partVals,
+       String user_name, List<String> group_names) throws MetaException, NoSuchObjectException,
+       InvalidObjectException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsWithAuth(String catName, String dbName, String tblName, short maxParts,
+       String userName, List<String> groupNames) throws MetaException, NoSuchObjectException,
+       InvalidObjectException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> listPartitionNamesPs(String catName, String db_name, String tbl_name, List<String> part_vals,
+       short max_parts) throws MetaException, NoSuchObjectException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsPsWithAuth(String catName, String db_name, String tbl_name,
+       List<String> part_vals, short max_parts, String userName, List<String> groupNames)
+       throws MetaException, InvalidObjectException, NoSuchObjectException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public long cleanupEvents() {
+ 
+     return 0;
+   }
+ 
+   @Override
+   public boolean addToken(String tokenIdentifier, String delegationToken) {
+     return false;
+   }
+ 
+   @Override
+   public boolean removeToken(String tokenIdentifier) {
+     return false;
+   }
+ 
+   @Override
+   public String getToken(String tokenIdentifier) {
+     return null;
+   }
+ 
+   @Override
+   public List<String> getAllTokenIdentifiers() {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public int addMasterKey(String key) {
+     return 0;
+   }
+ 
+   @Override
+   public void updateMasterKey(Integer seqNo, String key) {
+   }
+ 
+   @Override
+   public boolean removeMasterKey(Integer keySeq) {
+     return false;
+   }
+ 
+   @Override
+   public String[] getMasterKeys() {
+     return new String[0];
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalDBGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalTableGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalTableColumnGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listGlobalGrantsAll() {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listDBGrantsAll(String catName, String dbName) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPartitionColumnGrantsAll(String catName, String dbName, String tableName, String partitionName, String columnName) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listTableGrantsAll(String catName, String dbName, String tableName) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPartitionGrantsAll(String catName, String dbName, String tableName, String partitionName) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listTableColumnGrantsAll(String catName, String dbName, String tableName, String columnName) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public  ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName,
+       List<String> colName) throws MetaException, NoSuchObjectException {
+     return null;
+   }
+ 
+   @Override
++  public ColumnStatistics getTableColumnStatistics(
++      String catName, String dbName, String tableName, List<String> colName,
++      long txnid, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return null;
++  }
++
++  @Override
+   public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName,
+                                              String colName)
+       throws NoSuchObjectException, MetaException, InvalidObjectException {
+     return false;
+   }
+ 
+ 
+   @Override
+   public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName,
+     String partName, List<String> partVals, String colName)
+     throws NoSuchObjectException, MetaException, InvalidObjectException,
+     InvalidInputException {
+     return false;
+ 
+   }
+ 
+   @Override
 -  public boolean updateTableColumnStatistics(ColumnStatistics statsObj)
++  public boolean updateTableColumnStatistics(ColumnStatistics statsObj,
++      long txnId, String validWriteIds, long writeId)
+       throws NoSuchObjectException, MetaException, InvalidObjectException {
+     return false;
+   }
+ 
+   @Override
 -  public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,List<String> partVals)
++  public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,List<String> partVals,
++      long txnId, String validWriteIds, long writeId)
+     throws NoSuchObjectException, MetaException, InvalidObjectException {
+     return false;
+   }
+ 
+   @Override
+   public void verifySchema() throws MetaException {
+   }
+ 
+   @Override
+   public String getMetaStoreSchemaVersion() throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void setMetaStoreSchemaVersion(String version, String comment) throws MetaException {
+   }
+ 
+   @Override
+   public List<ColumnStatistics> getPartitionColumnStatistics(String catName, String dbName,
+       String tblName, List<String> colNames, List<String> partNames)
+       throws MetaException, NoSuchObjectException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
++  public List<ColumnStatistics> getPartitionColumnStatistics(
++      String catName, String dbName, String tblName, List<String> partNames,
++      List<String> colNames, long txnid, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return Collections.emptyList();
++  }
++
++  @Override
+   public boolean doesPartitionExist(String catName, String dbName, String tableName,
+       List<FieldSchema> partKeys, List<String> partVals)
+       throws MetaException, NoSuchObjectException {
+     return false;
+   }
+ 
+   @Override
+   public boolean addPartitions(String catName, String dbName, String tblName, List<Partition> parts)
+       throws InvalidObjectException, MetaException {
+     return false;
+   }
+ 
+   @Override
+   public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException {
+     return false;
+   }
+ 
+   @Override
+   public void dropPartitions(String catName, String dbName, String tblName, List<String> partNames) {
+   }
+ 
+   @Override
+   public void createFunction(Function func) throws InvalidObjectException,
+       MetaException {
+   }
+ 
+   @Override
+   public void alterFunction(String catName, String dbName, String funcName, Function newFunction)
+       throws InvalidObjectException, MetaException {
+   }
+ 
+   @Override
+   public void dropFunction(String catName, String dbName, String funcName)
+       throws MetaException, NoSuchObjectException, InvalidObjectException,
+       InvalidInputException {
+   }
+ 
+   @Override
+   public Function getFunction(String catName, String dbName, String funcName)
+       throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public List<Function> getAllFunctions(String catName)
+           throws MetaException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> getFunctions(String catName, String dbName, String pattern)
+       throws MetaException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public AggrStats get_aggr_stats_for(String catName, String dbName,
+       String tblName, List<String> partNames, List<String> colNames)
+       throws MetaException {
+     return null;
+   }
+ 
+   @Override
++  public AggrStats get_aggr_stats_for(
++      String catName, String dbName, String tblName, List<String> partNames,
++      List<String> colNames, long txnid, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return null;
++  }
++
++  @Override
+   public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) {
+     return null;
+   }
+ 
+   @Override
+   public void addNotificationEvent(NotificationEvent event) throws MetaException {
+ 
+   }
+ 
+   @Override
+   public void cleanNotificationEvents(int olderThan) {
+ 
+   }
+ 
+   @Override
+   public CurrentNotificationEventId getCurrentNotificationEventId() {
+     return null;
+   }
+ 
+   @Override
+   public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) {
+     return null;
+   }
+ 
+   @Override
+   public void flushCache() {
+ 
+   }
+ 
+   @Override
+   public ByteBuffer[] getFileMetadata(List<Long> fileIds) {
+     return null;
+   }
+ 
+   @Override
+   public void putFileMetadata(
+       List<Long> fileIds, List<ByteBuffer> metadata, FileMetadataExprType type) {
+   }
+ 
+   @Override
+   public boolean isFileMetadataSupported() {
+     return false;
+   }
+ 
+   @Override
+   public void getFileMetadataByExpr(List<Long> fileIds, FileMetadataExprType type, byte[] expr,
+       ByteBuffer[] metadatas, ByteBuffer[] stripeBitsets, boolean[] eliminated) {
+   }
+ 
+   @Override
+   public int getTableCount() throws MetaException {
+     return 0;
+   }
+ 
+   @Override
+   public int getPartitionCount() throws MetaException {
+     return 0;
+   }
+ 
+   @Override
+   public int getDatabaseCount() throws MetaException {
+     return 0;
+   }
+ 
+   @Override
+   public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) {
+     return null;
+   }
+ 
+   @Override
+   public List<SQLPrimaryKey> getPrimaryKeys(String catName, String db_name, String tbl_name)
+     throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLForeignKey> getForeignKeys(String catName, String parent_db_name,
+     String parent_tbl_name, String foreign_db_name, String foreign_tbl_name)
+     throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLUniqueConstraint> getUniqueConstraints(String catName, String db_name, String tbl_name)
+     throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLNotNullConstraint> getNotNullConstraints(String catName, String db_name, String tbl_name)
+     throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLDefaultConstraint> getDefaultConstraints(String catName, String db_name, String tbl_name)
+       throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLCheckConstraint> getCheckConstraints(String catName, String db_name, String tbl_name)
+       throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> createTableWithConstraints(Table tbl,
+     List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys,
+     List<SQLUniqueConstraint> uniqueConstraints,
+     List<SQLNotNullConstraint> notNullConstraints,
+     List<SQLDefaultConstraint> defaultConstraints,
+     List<SQLCheckConstraint> checkConstraints)
+     throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public void dropConstraint(String catName, String dbName, String tableName,
+   String constraintName, boolean missingOk) throws NoSuchObjectException {
+     // TODO Auto-generated method stub
+   }
+ 
+   @Override
+   public List<String> addPrimaryKeys(List<SQLPrimaryKey> pks)
+     throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> addForeignKeys(List<SQLForeignKey> fks)
+     throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> addUniqueConstraints(List<SQLUniqueConstraint> uks)
+     throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> addNotNullConstraints(List<SQLNotNullConstraint> nns)
+     throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> addDefaultConstraints(List<SQLDefaultConstraint> nns)
+       throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> addCheckConstraints(List<SQLCheckConstraint> nns)
+       throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public String getMetastoreDbUuid() throws MetaException {
+     throw new MetaException("Get metastore uuid is not implemented");
+   }
+ 
+   @Override
+   public void createResourcePlan(
+       WMResourcePlan resourcePlan, String copyFrom, int defaultPoolSize) throws MetaException {
+   }
+ 
+   @Override
+   public WMFullResourcePlan getResourcePlan(String name) throws NoSuchObjectException {
+     return null;
+   }
+ 
+   @Override
+   public List<WMResourcePlan> getAllResourcePlans() throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public WMFullResourcePlan alterResourcePlan(
+       String name, WMNullableResourcePlan resourcePlan, boolean canActivateDisabled, boolean canDeactivate,
+       boolean isReplace)
+       throws NoSuchObjectException, InvalidOperationException, MetaException {
+     return null;
+   }
+ 
+   @Override
+   public WMFullResourcePlan getActiveResourcePlan() throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public WMValidateResourcePlanResponse validateResourcePlan(String name)
+       throws NoSuchObjectException, InvalidObjectException, MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void dropResourcePlan(String name) throws NoSuchObjectException, MetaException {
+   }
+ 
+   @Override
+   public void createWMTrigger(WMTrigger trigger) throws MetaException {
+   }
+ 
+   @Override
+   public void alterWMTrigger(WMTrigger trigger)
+       throws NoSuchObjectException, InvalidOperationException, MetaException {
+   }
+ 
+   @Override
+   public void dropWMTrigger(String resourcePlanName, String triggerName)
+       throws NoSuchObjectException, MetaException {
+   }
+ 
+   @Override
+   public List<WMTrigger> getTriggersForResourcePlan(String resourcePlanName)
+       throws NoSuchObjectException, MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void createPool(WMPool pool) throws AlreadyExistsException, NoSuchObjectException,
+       InvalidOperationException, MetaException {
+   }
+ 
+   @Override
+   public void alterPool(WMNullablePool pool, String poolPath) throws AlreadyExistsException,
+       NoSuchObjectException, InvalidOperationException, MetaException {
+   }
+ 
+   @Override
+   public void dropWMPool(String resourcePlanName, String poolPath)
+       throws NoSuchObjectException, InvalidOperationException, MetaException {
+   }
+ 
+   @Override
+   public void createOrUpdateWMMapping(WMMapping mapping, boolean update)
+       throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+       MetaException {
+   }
+ 
+   @Override
+   public void dropWMMapping(WMMapping mapping)
+       throws NoSuchObjectException, InvalidOperationException, MetaException {
+   }
+ 
+   @Override
+   public void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
+       String poolPath) throws AlreadyExistsException, NoSuchObjectException,
+       InvalidOperationException, MetaException {
+   }
+ 
+   @Override
+   public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
+       String poolPath) throws NoSuchObjectException, InvalidOperationException, MetaException {
+   }
+ 
+   @Override
+   public List<MetaStoreUtils.ColStatsObjWithSourceInfo> getPartitionColStatsForDatabase(String catName, String dbName)
+       throws MetaException, NoSuchObjectException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public void createISchema(ISchema schema) throws AlreadyExistsException, MetaException {
+ 
+   }
+ 
+   @Override
+   public void alterISchema(ISchemaName schemaName, ISchema newSchema) throws NoSuchObjectException,
+       MetaException {
+ 
+   }
+ 
+   @Override
+   public ISchema getISchema(ISchemaName schemaName) throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void dropISchema(ISchemaName schemaName) throws NoSuchObjectException, MetaException {
+ 
+   }
+ 
+   @Override
+   public void addSchemaVersion(SchemaVersion schemaVersion) throws
+       AlreadyExistsException, InvalidObjectException, NoSuchObjectException, MetaException {
+ 
+   }
+ 
+   @Override
+   public void alterSchemaVersion(SchemaVersionDescriptor version, SchemaVersion newVersion) throws
+       NoSuchObjectException, MetaException {
+ 
+   }
+ 
+   @Override
+   public SchemaVersion getSchemaVersion(SchemaVersionDescriptor version) throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public SchemaVersion getLatestSchemaVersion(ISchemaName schemaName) throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public List<SchemaVersion> getAllSchemaVersion(ISchemaName schemaName) throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public List<SchemaVersion> getSchemaVersionsByColumns(String colName, String colNamespace,
+                                                         String type) throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void dropSchemaVersion(SchemaVersionDescriptor version) throws NoSuchObjectException,
+       MetaException {
+ 
+   }
+ 
+   @Override
+   public SerDeInfo getSerDeInfo(String serDeName) throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void addSerde(SerDeInfo serde) throws AlreadyExistsException, MetaException {
+ 
+   }
+ 
+   @Override
+   public void addRuntimeStat(RuntimeStat stat) throws MetaException {
+   }
+ 
+   @Override
+   public List<RuntimeStat> getRuntimeStats(int maxEntries, int maxCreateTime) throws MetaException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public int deleteRuntimeStats(int maxRetainSecs) throws MetaException {
+     return 0;
+   }
+ 
+   @Override
+   public List<TableName> getTableNamesWithStats() throws MetaException,
+       NoSuchObjectException {
+     return null;
+   }
+ 
+   @Override
+   public List<TableName> getAllTableNamesForStats() throws MetaException,
+       NoSuchObjectException {
+     return null;
+   }
+ 
+   @Override
+   public Map<String, List<String>> getPartitionColsWithStats(String catName,
+       String dbName, String tableName) throws MetaException,
+       NoSuchObjectException {
+     return null;
+   }
+ 
+   @Override
+   public void cleanWriteNotificationEvents(int olderThan) {
+   }
+ 
+   @Override
+   public List<WriteEventInfo> getAllWriteEventInfo(long txnId, String dbName, String tableName) throws MetaException {
+     return null;
+   }
+ }


[16/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index 0000000,9dd3787..7fd0642
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@@ -1,0 -1,5051 +1,5094 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.txn;
+ 
+ import java.io.PrintWriter;
+ import java.nio.ByteBuffer;
+ import java.sql.Connection;
+ import java.sql.Driver;
+ import java.sql.ResultSet;
+ import java.sql.SQLException;
+ import java.sql.SQLFeatureNotSupportedException;
+ import java.sql.Savepoint;
+ import java.sql.Statement;
+ import java.time.Instant;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.BitSet;
+ import java.util.Collections;
+ import java.util.Comparator;
+ import java.util.HashMap;
+ import java.util.HashSet;
+ import java.util.Iterator;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Properties;
+ import java.util.Set;
+ import java.util.SortedSet;
+ import java.util.TreeSet;
+ import java.util.concurrent.ConcurrentHashMap;
+ import java.util.concurrent.Semaphore;
+ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.atomic.AtomicInteger;
+ import java.util.concurrent.locks.ReentrantLock;
+ import java.util.regex.Pattern;
+ 
+ import javax.sql.DataSource;
+ 
+ import org.apache.commons.lang.ArrayUtils;
+ import org.apache.commons.lang.NotImplementedException;
+ import org.apache.hadoop.classification.InterfaceAudience;
+ import org.apache.hadoop.classification.InterfaceStability;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.ValidReadTxnList;
+ import org.apache.hadoop.hive.common.ValidReaderWriteIdList;
+ import org.apache.hadoop.hive.common.ValidTxnList;
+ import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
+ import org.apache.hadoop.hive.common.ValidWriteIdList;
+ import org.apache.hadoop.hive.common.classification.RetrySemantics;
+ import org.apache.hadoop.hive.metastore.DatabaseProduct;
+ import org.apache.hadoop.hive.metastore.Warehouse;
+ import org.apache.hadoop.hive.metastore.MetaStoreListenerNotifier;
+ import org.apache.hadoop.hive.metastore.TransactionalMetaStoreEventListener;
+ import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.datasource.DataSourceProvider;
+ import org.apache.hadoop.hive.metastore.datasource.DataSourceProviderFactory;
+ import org.apache.hadoop.hive.metastore.events.AbortTxnEvent;
+ import org.apache.hadoop.hive.metastore.events.AllocWriteIdEvent;
+ import org.apache.hadoop.hive.metastore.events.CommitTxnEvent;
+ import org.apache.hadoop.hive.metastore.events.OpenTxnEvent;
+ import org.apache.hadoop.hive.metastore.events.AcidWriteEvent;
+ import org.apache.hadoop.hive.metastore.messaging.EventMessage;
+ import org.apache.hadoop.hive.metastore.metrics.Metrics;
+ import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+ import org.apache.hadoop.hive.metastore.tools.SQLGenerator;
+ import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.hadoop.hive.metastore.utils.StringableMap;
+ import org.apache.hadoop.util.StringUtils;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
++
+ import com.google.common.annotations.VisibleForTesting;
+ 
+ /**
+  * A handler to answer transaction related calls that come into the metastore
+  * server.
+  *
+  * Note on log messages:  Please include txnid:X and lockid info using
+  * {@link JavaUtils#txnIdToString(long)}
+  * and {@link JavaUtils#lockIdToString(long)} in all messages.
+  * The txnid:X and lockid:Y matches how Thrift object toString() methods are generated,
+  * so keeping the format consistent makes grep'ing the logs much easier.
+  *
+  * Note on HIVE_LOCKS.hl_last_heartbeat.
+  * For locks that are part of transaction, we set this 0 (would rather set it to NULL but
+  * Currently the DB schema has this NOT NULL) and only update/read heartbeat from corresponding
+  * transaction in TXNS.
+  *
+  * In general there can be multiple metastores where this logic can execute, thus the DB is
+  * used to ensure proper mutexing of operations.
+  * Select ... For Update (or equivalent: either MsSql with(updlock) or actual Update stmt) is
+  * used to properly sequence operations.  Most notably:
+  * 1. various sequence IDs are generated with aid of this mutex
+  * 2. ensuring that each (Hive) Transaction state is transitioned atomically.  Transaction state
+  *  includes its actual state (Open, Aborted) as well as it's lock list/component list.  Thus all
+  *  per transaction ops, either start by update/delete of the relevant TXNS row or do S4U on that row.
+  *  This allows almost all operations to run at READ_COMMITTED and minimizes DB deadlocks.
+  * 3. checkLock() - this is mutexted entirely since we must ensure that while we check if some lock
+  *  can be granted, no other (strictly speaking "earlier") lock can change state.
+  *
+  * The exception to his is Derby which doesn't support proper S4U.  Derby is always running embedded
+  * (this is the only supported configuration for Derby)
+  * in the same JVM as HiveMetaStoreHandler thus we use JVM wide lock to properly sequnce the operations.
+  *
+  * {@link #derbyLock}
+ 
+  * If we ever decide to run remote Derby server, according to
+  * https://db.apache.org/derby/docs/10.0/manuals/develop/develop78.html all transactions will be
+  * seriazlied, so that would also work though has not been tested.
+  *
+  * General design note:
+  * It's imperative that any operation on a txn (e.g. commit), ensure (atomically) that this txn is
+  * still valid and active.  In the code this is usually achieved at the same time the txn record
+  * is locked for some operation.
+  * 
+  * Note on retry logic:
+  * Metastore has retry logic in both {@link org.apache.hadoop.hive.metastore.RetryingMetaStoreClient}
+  * and {@link org.apache.hadoop.hive.metastore.RetryingHMSHandler}.  The retry logic there is very
+  * generic and is not aware whether the operations are idempotent or not.  (This is separate from
+  * retry logic here in TxnHander which can/does retry DB errors intelligently).  The worst case is
+  * when an op here issues a successful commit against the RDBMS but the calling stack doesn't
+  * receive the ack and retries.  (If an op fails before commit, it's trivially idempotent)
+  * Thus the ops here need to be made idempotent as much as possible or
+  * the metstore call stack should have logic not to retry.  There are {@link RetrySemantics}
+  * annotations to document the behavior.
+  */
+ @InterfaceAudience.Private
+ @InterfaceStability.Evolving
+ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
+ 
+   static final protected char INITIATED_STATE = 'i';
+   static final protected char WORKING_STATE = 'w';
+   static final protected char READY_FOR_CLEANING = 'r';
+   static final char FAILED_STATE = 'f';
+   static final char SUCCEEDED_STATE = 's';
+   static final char ATTEMPTED_STATE = 'a';
+ 
+   // Compactor types
+   static final protected char MAJOR_TYPE = 'a';
+   static final protected char MINOR_TYPE = 'i';
+ 
+   // Transaction states
+   static final protected char TXN_ABORTED = 'a';
+   static final protected char TXN_OPEN = 'o';
+   //todo: make these like OperationType and remove above char constatns
+   enum TxnStatus {OPEN, ABORTED, COMMITTED, UNKNOWN}
+ 
+   public enum TxnType {
+     DEFAULT(0), REPL_CREATED(1), READ_ONLY(2);
+ 
+     private final int value;
+     TxnType(int value) {
+       this.value = value;
+     }
+ 
+     public int getValue() {
+       return value;
+     }
+   }
+ 
+   // Lock states
+   static final protected char LOCK_ACQUIRED = 'a';
+   static final protected char LOCK_WAITING = 'w';
+ 
+   // Lock types
+   static final protected char LOCK_EXCLUSIVE = 'e';
+   static final protected char LOCK_SHARED = 'r';
+   static final protected char LOCK_SEMI_SHARED = 'w';
+ 
+   static final private int ALLOWED_REPEATED_DEADLOCKS = 10;
+   static final private Logger LOG = LoggerFactory.getLogger(TxnHandler.class.getName());
+ 
+   static private DataSource connPool;
+   private static DataSource connPoolMutex;
+   static private boolean doRetryOnConnPool = false;
+ 
+   private List<TransactionalMetaStoreEventListener> transactionalListeners;
+   
+   private enum OpertaionType {
+     SELECT('s'), INSERT('i'), UPDATE('u'), DELETE('d');
+     private final char sqlConst;
+     OpertaionType(char sqlConst) {
+       this.sqlConst = sqlConst;
+     }
+     public String toString() {
+       return Character.toString(sqlConst);
+     }
+     public static OpertaionType fromString(char sqlConst) {
+       switch (sqlConst) {
+         case 's':
+           return SELECT;
+         case 'i':
+           return INSERT;
+         case 'u':
+           return UPDATE;
+         case 'd':
+           return DELETE;
+         default:
+           throw new IllegalArgumentException(quoteChar(sqlConst));
+       }
+     }
+     public static OpertaionType fromDataOperationType(DataOperationType dop) {
+       switch (dop) {
+         case SELECT:
+           return OpertaionType.SELECT;
+         case INSERT:
+           return OpertaionType.INSERT;
+         case UPDATE:
+           return OpertaionType.UPDATE;
+         case DELETE:
+           return OpertaionType.DELETE;
+         default:
+           throw new IllegalArgumentException("Unexpected value: " + dop);
+       }
+     }
+   }
+ 
+   // Maximum number of open transactions that's allowed
+   private static volatile int maxOpenTxns = 0;
+   // Whether number of open transactions reaches the threshold
+   private static volatile boolean tooManyOpenTxns = false;
+ 
+   /**
+    * Number of consecutive deadlocks we have seen
+    */
+   private int deadlockCnt;
+   private long deadlockRetryInterval;
+   protected Configuration conf;
+   private static DatabaseProduct dbProduct;
+   private static SQLGenerator sqlGenerator;
+ 
+   // (End user) Transaction timeout, in milliseconds.
+   private long timeout;
+ 
+   private String identifierQuoteString; // quotes to use for quoting tables, where necessary
+   private long retryInterval;
+   private int retryLimit;
+   private int retryNum;
+   // Current number of open txns
+   private AtomicInteger numOpenTxns;
+ 
+   /**
+    * Derby specific concurrency control
+    */
+   private static final ReentrantLock derbyLock = new ReentrantLock(true);
+   /**
+    * must be static since even in UT there may be > 1 instance of TxnHandler
+    * (e.g. via Compactor services)
+    */
+   private final static ConcurrentHashMap<String, Semaphore> derbyKey2Lock = new ConcurrentHashMap<>();
+   private static final String hostname = JavaUtils.hostname();
+ 
+   // Private methods should never catch SQLException and then throw MetaException.  The public
+   // methods depend on SQLException coming back so they can detect and handle deadlocks.  Private
+   // methods should only throw MetaException when they explicitly know there's a logic error and
+   // they want to throw past the public methods.
+   //
+   // All public methods that write to the database have to check for deadlocks when a SQLException
+   // comes back and handle it if they see one.  This has to be done with the connection pooling
+   // in mind.  To do this they should call checkRetryable() AFTER rolling back the db transaction,
+   // and then they should catch RetryException and call themselves recursively. See commitTxn for an example.
+ 
+   public TxnHandler() {
+   }
+ 
+   /**
+    * This is logically part of c'tor and must be called prior to any other method.
+    * Not physically part of c'tor due to use of reflection
+    */
+   public void setConf(Configuration conf) {
+     this.conf = conf;
+ 
+     checkQFileTestHack();
+ 
+     synchronized (TxnHandler.class) {
+       if (connPool == null) {
+         Connection dbConn = null;
+         // Set up the JDBC connection pool
+         try {
+           int maxPoolSize = MetastoreConf.getIntVar(conf, ConfVars.CONNECTION_POOLING_MAX_CONNECTIONS);
+           long getConnectionTimeoutMs = 30000;
+           connPool = setupJdbcConnectionPool(conf, maxPoolSize, getConnectionTimeoutMs);
+           /*the mutex pools should ideally be somewhat larger since some operations require 1
+            connection from each pool and we want to avoid taking a connection from primary pool
+            and then blocking because mutex pool is empty.  There is only 1 thread in any HMS trying
+            to mutex on each MUTEX_KEY except MUTEX_KEY.CheckLock.  The CheckLock operation gets a
+            connection from connPool first, then connPoolMutex.  All others, go in the opposite
+            order (not very elegant...).  So number of connection requests for connPoolMutex cannot
+            exceed (size of connPool + MUTEX_KEY.values().length - 1).*/
+           connPoolMutex = setupJdbcConnectionPool(conf, maxPoolSize + MUTEX_KEY.values().length, getConnectionTimeoutMs);
+           dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+           determineDatabaseProduct(dbConn);
+           sqlGenerator = new SQLGenerator(dbProduct, conf);
+         } catch (SQLException e) {
+           String msg = "Unable to instantiate JDBC connection pooling, " + e.getMessage();
+           LOG.error(msg);
+           throw new RuntimeException(e);
+         } finally {
+           closeDbConn(dbConn);
+         }
+       }
+     }
+ 
+     numOpenTxns = Metrics.getOrCreateGauge(MetricsConstants.NUM_OPEN_TXNS);
+ 
+     timeout = MetastoreConf.getTimeVar(conf, ConfVars.TXN_TIMEOUT, TimeUnit.MILLISECONDS);
+     buildJumpTable();
+     retryInterval = MetastoreConf.getTimeVar(conf, ConfVars.HMS_HANDLER_INTERVAL,
+         TimeUnit.MILLISECONDS);
+     retryLimit = MetastoreConf.getIntVar(conf, ConfVars.HMS_HANDLER_ATTEMPTS);
+     deadlockRetryInterval = retryInterval / 10;
+     maxOpenTxns = MetastoreConf.getIntVar(conf, ConfVars.MAX_OPEN_TXNS);
+ 
+     try {
+       transactionalListeners = MetaStoreUtils.getMetaStoreListeners(
+               TransactionalMetaStoreEventListener.class,
+                       conf, MetastoreConf.getVar(conf, ConfVars.TRANSACTIONAL_EVENT_LISTENERS));
+     } catch(MetaException e) {
+       String msg = "Unable to get transaction listeners, " + e.getMessage();
+       LOG.error(msg);
+       throw new RuntimeException(e);
+     }
+   }
+ 
+   @Override
+   public Configuration getConf() {
+     return conf;
+   }
+ 
+   @Override
+   @RetrySemantics.ReadOnly
+   public GetOpenTxnsInfoResponse getOpenTxnsInfo() throws MetaException {
+     try {
+       // We need to figure out the current transaction number and the list of
+       // open transactions.  To avoid needing a transaction on the underlying
+       // database we'll look at the current transaction number first.  If it
+       // subsequently shows up in the open list that's ok.
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet rs = null;
+       try {
+         /**
+          * This method can run at READ_COMMITTED as long as long as
+          * {@link #openTxns(org.apache.hadoop.hive.metastore.api.OpenTxnRequest)} is atomic.
+          * More specifically, as long as advancing TransactionID in NEXT_TXN_ID is atomic with
+          * adding corresponding entries into TXNS.  The reason is that any txnid below HWM
+          * is either in TXNS and thus considered open (Open/Aborted) or it's considered Committed.
+          */
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "select ntxn_next - 1 from NEXT_TXN_ID";
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (!rs.next()) {
+           throw new MetaException("Transaction tables not properly " +
+             "initialized, no record found in next_txn_id");
+         }
+         long hwm = rs.getLong(1);
+         if (rs.wasNull()) {
+           throw new MetaException("Transaction tables not properly " +
+             "initialized, null record found in next_txn_id");
+         }
+         close(rs);
+         List<TxnInfo> txnInfos = new ArrayList<>();
+         //need the WHERE clause below to ensure consistent results with READ_COMMITTED
+         s = "select txn_id, txn_state, txn_user, txn_host, txn_started, txn_last_heartbeat from " +
+             "TXNS where txn_id <= " + hwm;
+         LOG.debug("Going to execute query<" + s + ">");
+         rs = stmt.executeQuery(s);
+         while (rs.next()) {
+           char c = rs.getString(2).charAt(0);
+           TxnState state;
+           switch (c) {
+             case TXN_ABORTED:
+               state = TxnState.ABORTED;
+               break;
+ 
+             case TXN_OPEN:
+               state = TxnState.OPEN;
+               break;
+ 
+             default:
+               throw new MetaException("Unexpected transaction state " + c +
+                 " found in txns table");
+           }
+           TxnInfo txnInfo = new TxnInfo(rs.getLong(1), state, rs.getString(3), rs.getString(4));
+           txnInfo.setStartedTime(rs.getLong(5));
+           txnInfo.setLastHeartbeatTime(rs.getLong(6));
+           txnInfos.add(txnInfo);
+         }
+         LOG.debug("Going to rollback");
+         dbConn.rollback();
+         return new GetOpenTxnsInfoResponse(hwm, txnInfos);
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "getOpenTxnsInfo");
+         throw new MetaException("Unable to select from transaction database: " + getMessage(e)
+           + StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+       }
+     } catch (RetryException e) {
+       return getOpenTxnsInfo();
+     }
+   }
++
+   @Override
+   @RetrySemantics.ReadOnly
+   public GetOpenTxnsResponse getOpenTxns() throws MetaException {
+     try {
+       // We need to figure out the current transaction number and the list of
+       // open transactions.  To avoid needing a transaction on the underlying
+       // database we'll look at the current transaction number first.  If it
+       // subsequently shows up in the open list that's ok.
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet rs = null;
+       try {
+         /**
+          * This runs at READ_COMMITTED for exactly the same reason as {@link #getOpenTxnsInfo()}
+          */
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "select ntxn_next - 1 from NEXT_TXN_ID";
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (!rs.next()) {
+           throw new MetaException("Transaction tables not properly " +
+             "initialized, no record found in next_txn_id");
+         }
+         long hwm = rs.getLong(1);
+         if (rs.wasNull()) {
+           throw new MetaException("Transaction tables not properly " +
+             "initialized, null record found in next_txn_id");
+         }
+         close(rs);
+         List<Long> openList = new ArrayList<>();
+         //need the WHERE clause below to ensure consistent results with READ_COMMITTED
+         s = "select txn_id, txn_state from TXNS where txn_id <= " + hwm + " order by txn_id";
+         LOG.debug("Going to execute query<" + s + ">");
+         rs = stmt.executeQuery(s);
+         long minOpenTxn = Long.MAX_VALUE;
+         BitSet abortedBits = new BitSet();
+         while (rs.next()) {
+           long txnId = rs.getLong(1);
+           openList.add(txnId);
+           char c = rs.getString(2).charAt(0);
+           if(c == TXN_OPEN) {
+             minOpenTxn = Math.min(minOpenTxn, txnId);
+           } else if (c == TXN_ABORTED) {
+             abortedBits.set(openList.size() - 1);
+           }
+         }
+         LOG.debug("Going to rollback");
+         dbConn.rollback();
+         ByteBuffer byteBuffer = ByteBuffer.wrap(abortedBits.toByteArray());
+         GetOpenTxnsResponse otr = new GetOpenTxnsResponse(hwm, openList, byteBuffer);
+         if(minOpenTxn < Long.MAX_VALUE) {
+           otr.setMin_open_txn(minOpenTxn);
+         }
+         return otr;
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "getOpenTxns");
+         throw new MetaException("Unable to select from transaction database, "
+           + StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+       }
+     } catch (RetryException e) {
+       return getOpenTxns();
+     }
+   }
+ 
+   /**
+    * Retry-by-caller note:
+    * Worst case, it will leave an open txn which will timeout.
+    */
+   @Override
+   @RetrySemantics.Idempotent
+   public OpenTxnsResponse openTxns(OpenTxnRequest rqst) throws MetaException {
+     if (!tooManyOpenTxns && numOpenTxns.get() >= maxOpenTxns) {
+       tooManyOpenTxns = true;
+     }
+     if (tooManyOpenTxns) {
+       if (numOpenTxns.get() < maxOpenTxns * 0.9) {
+         tooManyOpenTxns = false;
+       } else {
+         LOG.warn("Maximum allowed number of open transactions (" + maxOpenTxns + ") has been " +
+             "reached. Current number of open transactions: " + numOpenTxns);
+         throw new MetaException("Maximum allowed number of open transactions has been reached. " +
+             "See hive.max.open.txns.");
+       }
+     }
+ 
+     int numTxns = rqst.getNum_txns();
+     if (numTxns <= 0) {
+       throw new MetaException("Invalid input for number of txns: " + numTxns);
+     }
+ 
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       try {
+         lockInternal();
+         /**
+          * To make {@link #getOpenTxns()}/{@link #getOpenTxnsInfo()} work correctly, this operation must ensure
+          * that advancing the counter in NEXT_TXN_ID and adding appropriate entries to TXNS is atomic.
+          * Also, advancing the counter must work when multiple metastores are running.
+          * SELECT ... FOR UPDATE is used to prevent
+          * concurrent DB transactions being rolled back due to Write-Write conflict on NEXT_TXN_ID.
+          *
+          * In the current design, there can be several metastore instances running in a given Warehouse.
+          * This makes ideas like reserving a range of IDs to save trips to DB impossible.  For example,
+          * a client may go to MS1 and start a transaction with ID 500 to update a particular row.
+          * Now the same client will start another transaction, except it ends up on MS2 and may get
+          * transaction ID 400 and update the same row.  Now the merge that happens to materialize the snapshot
+          * on read will thing the version of the row from transaction ID 500 is the latest one.
+          *
+          * Longer term we can consider running Active-Passive MS (at least wrt to ACID operations).  This
+          * set could support a write-through cache for added performance.
+          */
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         // Make sure the user has not requested an insane amount of txns.
+         int maxTxns = MetastoreConf.getIntVar(conf, ConfVars.TXN_MAX_OPEN_BATCH);
+         if (numTxns > maxTxns) numTxns = maxTxns;
+ 
+         stmt = dbConn.createStatement();
+         List<Long> txnIds = openTxns(dbConn, stmt, rqst);
+ 
+         LOG.debug("Going to commit");
+         dbConn.commit();
+         return new OpenTxnsResponse(txnIds);
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "openTxns(" + rqst + ")");
+         throw new MetaException("Unable to select from transaction database "
+           + StringUtils.stringifyException(e));
+       } finally {
+         close(null, stmt, dbConn);
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       return openTxns(rqst);
+     }
+   }
+ 
+   private List<Long> openTxns(Connection dbConn, Statement stmt, OpenTxnRequest rqst)
+           throws SQLException, MetaException {
+     int numTxns = rqst.getNum_txns();
+     ResultSet rs = null;
+     TxnType txnType = TxnType.DEFAULT;
+     try {
+       if (rqst.isSetReplPolicy()) {
+         List<Long> targetTxnIdList = getTargetTxnIdList(rqst.getReplPolicy(), rqst.getReplSrcTxnIds(), stmt);
+ 
+         if (!targetTxnIdList.isEmpty()) {
+           if (targetTxnIdList.size() != rqst.getReplSrcTxnIds().size()) {
+             LOG.warn("target txn id number " + targetTxnIdList.toString() +
+                     " is not matching with source txn id number " + rqst.getReplSrcTxnIds().toString());
+           }
+           LOG.info("Target transactions " + targetTxnIdList.toString() + " are present for repl policy :" +
+                   rqst.getReplPolicy() + " and Source transaction id : " + rqst.getReplSrcTxnIds().toString());
+           return targetTxnIdList;
+         }
+         txnType = TxnType.REPL_CREATED;
+       }
+ 
+       String s = sqlGenerator.addForUpdateClause("select ntxn_next from NEXT_TXN_ID");
+       LOG.debug("Going to execute query <" + s + ">");
+       rs = stmt.executeQuery(s);
+       if (!rs.next()) {
+         throw new MetaException("Transaction database not properly " +
+                 "configured, can't find next transaction id.");
+       }
+       long first = rs.getLong(1);
+       s = "update NEXT_TXN_ID set ntxn_next = " + (first + numTxns);
+       LOG.debug("Going to execute update <" + s + ">");
+       stmt.executeUpdate(s);
+ 
+       long now = getDbTime(dbConn);
+       List<Long> txnIds = new ArrayList<>(numTxns);
+ 
+       List<String> rows = new ArrayList<>();
+       for (long i = first; i < first + numTxns; i++) {
+         txnIds.add(i);
+         rows.add(i + "," + quoteChar(TXN_OPEN) + "," + now + "," + now + ","
+                 + quoteString(rqst.getUser()) + "," + quoteString(rqst.getHostname()) + "," + txnType.getValue());
+       }
+       List<String> queries = sqlGenerator.createInsertValuesStmt(
+             "TXNS (txn_id, txn_state, txn_started, txn_last_heartbeat, txn_user, txn_host, txn_type)", rows);
+       for (String q : queries) {
+         LOG.debug("Going to execute update <" + q + ">");
+         stmt.execute(q);
+       }
+ 
+       // Need to register minimum open txnid for current transactions into MIN_HISTORY table.
+       s = "select min(txn_id) from TXNS where txn_state = " + quoteChar(TXN_OPEN);
+       LOG.debug("Going to execute query <" + s + ">");
+       rs = stmt.executeQuery(s);
+       if (!rs.next()) {
+         throw new IllegalStateException("Scalar query returned no rows?!?!!");
+       }
+ 
+       // TXNS table should have atleast one entry because we just inserted the newly opened txns.
+       // So, min(txn_id) would be a non-zero txnid.
+       long minOpenTxnId = rs.getLong(1);
+       assert (minOpenTxnId > 0);
+       rows.clear();
+       for (long txnId = first; txnId < first + numTxns; txnId++) {
+         rows.add(txnId + ", " + minOpenTxnId);
+       }
+ 
+       // Insert transaction entries into MIN_HISTORY_LEVEL.
+       List<String> inserts = sqlGenerator.createInsertValuesStmt(
+               "MIN_HISTORY_LEVEL (mhl_txnid, mhl_min_open_txnid)", rows);
+       for (String insert : inserts) {
+         LOG.debug("Going to execute insert <" + insert + ">");
+         stmt.execute(insert);
+       }
+       LOG.info("Added entries to MIN_HISTORY_LEVEL for current txns: (" + txnIds
+               + ") with min_open_txn: " + minOpenTxnId);
+ 
+       if (rqst.isSetReplPolicy()) {
+         List<String> rowsRepl = new ArrayList<>();
+ 
+         for (int i = 0; i < numTxns; i++) {
+           rowsRepl.add(
+                   quoteString(rqst.getReplPolicy()) + "," + rqst.getReplSrcTxnIds().get(i) + "," + txnIds.get(i));
+         }
+ 
+         List<String> queriesRepl = sqlGenerator.createInsertValuesStmt(
+                 "REPL_TXN_MAP (RTM_REPL_POLICY, RTM_SRC_TXN_ID, RTM_TARGET_TXN_ID)", rowsRepl);
+ 
+         for (String query : queriesRepl) {
+           LOG.info("Going to execute insert <" + query + ">");
+           stmt.execute(query);
+         }
+       }
+ 
+       if (transactionalListeners != null) {
+         MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners,
+                 EventMessage.EventType.OPEN_TXN, new OpenTxnEvent(txnIds, null), dbConn, sqlGenerator);
+       }
+       return txnIds;
+     } finally {
+       close(rs);
+     }
+   }
+ 
+   private List<Long> getTargetTxnIdList(String replPolicy, List<Long> sourceTxnIdList, Statement stmt)
+           throws SQLException {
+     ResultSet rs = null;
+     try {
+       List<String> inQueries = new ArrayList<>();
+       StringBuilder prefix = new StringBuilder();
+       StringBuilder suffix = new StringBuilder();
+       List<Long> targetTxnIdList = new ArrayList<>();
+       prefix.append("select RTM_TARGET_TXN_ID from REPL_TXN_MAP where ");
+       suffix.append(" and RTM_REPL_POLICY = " + quoteString(replPolicy));
+       TxnUtils.buildQueryWithINClause(conf, inQueries, prefix, suffix, sourceTxnIdList,
+               "RTM_SRC_TXN_ID", false, false);
+       for (String query : inQueries) {
+         LOG.debug("Going to execute select <" + query + ">");
+         rs = stmt.executeQuery(query);
+         while (rs.next()) {
+           targetTxnIdList.add(rs.getLong(1));
+         }
+       }
+       LOG.debug("targetTxnid for srcTxnId " + sourceTxnIdList.toString() + " is " + targetTxnIdList.toString());
+       return targetTxnIdList;
+     }  catch (SQLException e) {
+       LOG.warn("failed to get target txn ids " + e.getMessage());
+       throw e;
+     } finally {
+       close(rs);
+     }
+   }
+ 
+   @Override
+   @RetrySemantics.Idempotent
+   public long getTargetTxnId(String replPolicy, long sourceTxnId) throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       try {
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         List<Long> targetTxnIds = getTargetTxnIdList(replPolicy, Collections.singletonList(sourceTxnId), stmt);
+         if (targetTxnIds.isEmpty()) {
+           LOG.info("Txn {} not present for repl policy {}", sourceTxnId, replPolicy);
+           return -1;
+         }
+         assert (targetTxnIds.size() == 1);
+         return targetTxnIds.get(0);
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "getTargetTxnId(" + replPolicy + sourceTxnId + ")");
+         throw new MetaException("Unable to get target transaction id "
+                 + StringUtils.stringifyException(e));
+       } finally {
+         close(null, stmt, dbConn);
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       return getTargetTxnId(replPolicy, sourceTxnId);
+     }
+   }
+ 
+   @Override
+   @RetrySemantics.Idempotent
+   public void abortTxn(AbortTxnRequest rqst) throws NoSuchTxnException, MetaException, TxnAbortedException {
+     long txnid = rqst.getTxnid();
+     long sourceTxnId = -1;
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       try {
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+ 
+         if (rqst.isSetReplPolicy()) {
+           sourceTxnId = rqst.getTxnid();
+           List<Long> targetTxnIds = getTargetTxnIdList(rqst.getReplPolicy(),
+                   Collections.singletonList(sourceTxnId), stmt);
+           if (targetTxnIds.isEmpty()) {
+             LOG.info("Target txn id is missing for source txn id : " + sourceTxnId +
+                     " and repl policy " + rqst.getReplPolicy());
+             return;
+           }
+           assert targetTxnIds.size() == 1;
+           txnid = targetTxnIds.get(0);
+         }
+ 
+         if (abortTxns(dbConn, Collections.singletonList(txnid), true) != 1) {
+           TxnStatus status = findTxnState(txnid,stmt);
+           if(status == TxnStatus.ABORTED) {
+             if (rqst.isSetReplPolicy()) {
+               // in case of replication, idempotent is taken care by getTargetTxnId
+               LOG.warn("Invalid state ABORTED for transactions started using replication replay task");
+               String s = "delete from REPL_TXN_MAP where RTM_SRC_TXN_ID = " + sourceTxnId +
+                       " and RTM_REPL_POLICY = " + quoteString(rqst.getReplPolicy());
+               LOG.info("Going to execute  <" + s + ">");
+               stmt.executeUpdate(s);
+             }
+             LOG.info("abortTxn(" + JavaUtils.txnIdToString(txnid) +
+               ") requested by it is already " + TxnStatus.ABORTED);
+             return;
+           }
+           raiseTxnUnexpectedState(status, txnid);
+         }
+ 
+         if (rqst.isSetReplPolicy()) {
+           String s = "delete from REPL_TXN_MAP where RTM_SRC_TXN_ID = " + sourceTxnId +
+               " and RTM_REPL_POLICY = " + quoteString(rqst.getReplPolicy());
+           LOG.info("Going to execute  <" + s + ">");
+           stmt.executeUpdate(s);
+         }
+ 
+         if (transactionalListeners != null) {
+           MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners,
+                   EventMessage.EventType.ABORT_TXN, new AbortTxnEvent(txnid, null), dbConn, sqlGenerator);
+         }
+ 
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "abortTxn(" + rqst + ")");
+         throw new MetaException("Unable to update transaction database "
+           + StringUtils.stringifyException(e));
+       } finally {
+         close(null, stmt, dbConn);
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       abortTxn(rqst);
+     }
+   }
+ 
+   @Override
+   @RetrySemantics.Idempotent
+   public void abortTxns(AbortTxnsRequest rqst) throws NoSuchTxnException, MetaException {
+     List<Long> txnids = rqst.getTxn_ids();
+     try {
+       Connection dbConn = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         int numAborted = abortTxns(dbConn, txnids, false);
+         if (numAborted != txnids.size()) {
+           LOG.warn("Abort Transactions command only aborted " + numAborted + " out of " +
+               txnids.size() + " transactions. It's possible that the other " +
+               (txnids.size() - numAborted) +
+               " transactions have been aborted or committed, or the transaction ids are invalid.");
+         }
+ 
+         for (Long txnId : txnids) {
+           if (transactionalListeners != null) {
+             MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners,
+                     EventMessage.EventType.ABORT_TXN, new AbortTxnEvent(txnId, null), dbConn, sqlGenerator);
+           }
+         }
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "abortTxns(" + rqst + ")");
+         throw new MetaException("Unable to update transaction database "
+             + StringUtils.stringifyException(e));
+       } finally {
+         closeDbConn(dbConn);
+       }
+     } catch (RetryException e) {
+       abortTxns(rqst);
+     }
+   }
+ 
+   /**
+    * Concurrency/isolation notes:
+    * This is mutexed with {@link #openTxns(OpenTxnRequest)} and other {@link #commitTxn(CommitTxnRequest)}
+    * operations using select4update on NEXT_TXN_ID.  Also, mutexes on TXNX table for specific txnid:X
+    * see more notes below.
+    * In order to prevent lost updates, we need to determine if any 2 transactions overlap.  Each txn
+    * is viewed as an interval [M,N]. M is the txnid and N is taken from the same NEXT_TXN_ID sequence
+    * so that we can compare commit time of txn T with start time of txn S.  This sequence can be thought of
+    * as a logical time counter.  If S.commitTime < T.startTime, T and S do NOT overlap.
+    *
+    * Motivating example:
+    * Suppose we have multi-statment transactions T and S both of which are attempting x = x + 1
+    * In order to prevent lost update problem, the the non-overlapping txns must lock in the snapshot
+    * that they read appropriately.  In particular, if txns do not overlap, then one follows the other
+    * (assumig they write the same entity), and thus the 2nd must see changes of the 1st.  We ensure
+    * this by locking in snapshot after 
+    * {@link #openTxns(OpenTxnRequest)} call is made (see org.apache.hadoop.hive.ql.Driver.acquireLocksAndOpenTxn)
+    * and mutexing openTxn() with commit().  In other words, once a S.commit() starts we must ensure
+    * that txn T which will be considered a later txn, locks in a snapshot that includes the result
+    * of S's commit (assuming no other txns).
+    * As a counter example, suppose we have S[3,3] and T[4,4] (commitId=txnid means no other transactions
+    * were running in parallel).  If T and S both locked in the same snapshot (for example commit of
+    * txnid:2, which is possible if commitTxn() and openTxnx() is not mutexed)
+    * 'x' would be updated to the same value by both, i.e. lost update. 
+    */
+   @Override
+   @RetrySemantics.Idempotent("No-op if already committed")
+   public void commitTxn(CommitTxnRequest rqst)
+     throws NoSuchTxnException, TxnAbortedException, MetaException {
+     char isUpdateDelete = 'N';
+     long txnid = rqst.getTxnid();
+     long sourceTxnId = -1;
+ 
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet lockHandle = null;
+       ResultSet commitIdRs = null, rs;
+       try {
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+ 
+         if (rqst.isSetReplPolicy()) {
+           sourceTxnId = rqst.getTxnid();
+           List<Long> targetTxnIds = getTargetTxnIdList(rqst.getReplPolicy(),
+                   Collections.singletonList(sourceTxnId), stmt);
+           if (targetTxnIds.isEmpty()) {
+             LOG.info("Target txn id is missing for source txn id : " + sourceTxnId +
+                     " and repl policy " + rqst.getReplPolicy());
+             return;
+           }
+           assert targetTxnIds.size() == 1;
+           txnid = targetTxnIds.get(0);
+         }
+ 
+         /**
+          * Runs at READ_COMMITTED with S4U on TXNS row for "txnid".  S4U ensures that no other
+          * operation can change this txn (such acquiring locks). While lock() and commitTxn()
+          * should not normally run concurrently (for same txn) but could due to bugs in the client
+          * which could then corrupt internal transaction manager state.  Also competes with abortTxn().
+          */
+         lockHandle = lockTransactionRecord(stmt, txnid, TXN_OPEN);
+         if (lockHandle == null) {
+           //if here, txn was not found (in expected state)
+           TxnStatus actualTxnStatus = findTxnState(txnid, stmt);
+           if(actualTxnStatus == TxnStatus.COMMITTED) {
+             if (rqst.isSetReplPolicy()) {
+               // in case of replication, idempotent is taken care by getTargetTxnId
+               LOG.warn("Invalid state COMMITTED for transactions started using replication replay task");
+             }
+             /**
+              * This makes the operation idempotent
+              * (assume that this is most likely due to retry logic)
+              */
+             LOG.info("Nth commitTxn(" + JavaUtils.txnIdToString(txnid) + ") msg");
+             return;
+           }
+           raiseTxnUnexpectedState(actualTxnStatus, txnid);
+           shouldNeverHappen(txnid);
+           //dbConn is rolled back in finally{}
+         }
+ 
+         String conflictSQLSuffix = null;
+         if (rqst.isSetReplPolicy()) {
+           rs = null;
+         } else {
+           conflictSQLSuffix = "from TXN_COMPONENTS where tc_txnid=" + txnid + " and tc_operation_type IN(" +
+                   quoteChar(OpertaionType.UPDATE.sqlConst) + "," + quoteChar(OpertaionType.DELETE.sqlConst) + ")";
+           rs = stmt.executeQuery(sqlGenerator.addLimitClause(1,
+                   "tc_operation_type " + conflictSQLSuffix));
+         }
+         if (rs != null && rs.next()) {
+           isUpdateDelete = 'Y';
+           close(rs);
+           //if here it means currently committing txn performed update/delete and we should check WW conflict
+           /**
+            * This S4U will mutex with other commitTxn() and openTxns(). 
+            * -1 below makes txn intervals look like [3,3] [4,4] if all txns are serial
+            * Note: it's possible to have several txns have the same commit id.  Suppose 3 txns start
+            * at the same time and no new txns start until all 3 commit.
+            * We could've incremented the sequence for commitId is well but it doesn't add anything functionally.
+            */
+           commitIdRs = stmt.executeQuery(sqlGenerator.addForUpdateClause("select ntxn_next - 1 from NEXT_TXN_ID"));
+           if (!commitIdRs.next()) {
+             throw new IllegalStateException("No rows found in NEXT_TXN_ID");
+           }
+           long commitId = commitIdRs.getLong(1);
+           Savepoint undoWriteSetForCurrentTxn = dbConn.setSavepoint();
+           /**
+            * "select distinct" is used below because
+            * 1. once we get to multi-statement txns, we only care to record that something was updated once
+            * 2. if {@link #addDynamicPartitions(AddDynamicPartitions)} is retried by caller it my create
+            *  duplicate entries in TXN_COMPONENTS
+            * but we want to add a PK on WRITE_SET which won't have unique rows w/o this distinct
+            * even if it includes all of it's columns
+            */
+           int numCompsWritten = stmt.executeUpdate(
+             "insert into WRITE_SET (ws_database, ws_table, ws_partition, ws_txnid, ws_commit_id, ws_operation_type)" +
+             " select distinct tc_database, tc_table, tc_partition, tc_txnid, " + commitId + ", tc_operation_type " + conflictSQLSuffix);
+           /**
+            * see if there are any overlapping txns wrote the same element, i.e. have a conflict
+            * Since entire commit operation is mutexed wrt other start/commit ops,
+            * committed.ws_commit_id <= current.ws_commit_id for all txns
+            * thus if committed.ws_commit_id < current.ws_txnid, transactions do NOT overlap
+            * For example, [17,20] is committed, [6,80] is being committed right now - these overlap
+            * [17,20] committed and [21,21] committing now - these do not overlap.
+            * [17,18] committed and [18,19] committing now - these overlap  (here 18 started while 17 was still running)
+            */
+           rs = stmt.executeQuery
+             (sqlGenerator.addLimitClause(1, "committed.ws_txnid, committed.ws_commit_id, committed.ws_database," +
+               "committed.ws_table, committed.ws_partition, cur.ws_commit_id cur_ws_commit_id, " +
+               "cur.ws_operation_type cur_op, committed.ws_operation_type committed_op " +
+               "from WRITE_SET committed INNER JOIN WRITE_SET cur " +
+               "ON committed.ws_database=cur.ws_database and committed.ws_table=cur.ws_table " +
+               //For partitioned table we always track writes at partition level (never at table)
+               //and for non partitioned - always at table level, thus the same table should never
+               //have entries with partition key and w/o
+               "and (committed.ws_partition=cur.ws_partition or (committed.ws_partition is null and cur.ws_partition is null)) " +
+               "where cur.ws_txnid <= committed.ws_commit_id" + //txns overlap; could replace ws_txnid
+               // with txnid, though any decent DB should infer this
+               " and cur.ws_txnid=" + txnid + //make sure RHS of join only has rows we just inserted as
+               // part of this commitTxn() op
+               " and committed.ws_txnid <> " + txnid + //and LHS only has committed txns
+               //U+U and U+D is a conflict but D+D is not and we don't currently track I in WRITE_SET at all
+               " and (committed.ws_operation_type=" + quoteChar(OpertaionType.UPDATE.sqlConst) +
+               " OR cur.ws_operation_type=" + quoteChar(OpertaionType.UPDATE.sqlConst) + ")"));
+           if (rs.next()) {
+             //found a conflict
+             String committedTxn = "[" + JavaUtils.txnIdToString(rs.getLong(1)) + "," + rs.getLong(2) + "]";
+             StringBuilder resource = new StringBuilder(rs.getString(3)).append("/").append(rs.getString(4));
+             String partitionName = rs.getString(5);
+             if (partitionName != null) {
+               resource.append('/').append(partitionName);
+             }
+             String msg = "Aborting [" + JavaUtils.txnIdToString(txnid) + "," + rs.getLong(6) + "]" + " due to a write conflict on " + resource +
+               " committed by " + committedTxn + " " + rs.getString(7) + "/" + rs.getString(8);
+             close(rs);
+             //remove WRITE_SET info for current txn since it's about to abort
+             dbConn.rollback(undoWriteSetForCurrentTxn);
+             LOG.info(msg);
+             //todo: should make abortTxns() write something into TXNS.TXN_META_INFO about this
+             if (abortTxns(dbConn, Collections.singletonList(txnid), true) != 1) {
+               throw new IllegalStateException(msg + " FAILED!");
+             }
+             dbConn.commit();
+             close(null, stmt, dbConn);
+             throw new TxnAbortedException(msg);
+           } else {
+             //no conflicting operations, proceed with the rest of commit sequence
+           }
+         }
+         else {
+           /**
+            * current txn didn't update/delete anything (may have inserted), so just proceed with commit
+            *
+            * We only care about commit id for write txns, so for RO (when supported) txns we don't
+            * have to mutex on NEXT_TXN_ID.
+            * Consider: if RO txn is after a W txn, then RO's openTxns() will be mutexed with W's
+            * commitTxn() because both do S4U on NEXT_TXN_ID and thus RO will see result of W txn.
+            * If RO < W, then there is no reads-from relationship.
+            * In replication flow we don't expect any write write conflict as it should have been handled at source.
+            */
+         }
+ 
+         String s;
+         if (!rqst.isSetReplPolicy()) {
+           // Move the record from txn_components into completed_txn_components so that the compactor
+           // knows where to look to compact.
+           s = "insert into COMPLETED_TXN_COMPONENTS (ctc_txnid, ctc_database, " +
+                   "ctc_table, ctc_partition, ctc_writeid, ctc_update_delete) select tc_txnid, tc_database, tc_table, " +
+                   "tc_partition, tc_writeid, '" + isUpdateDelete + "' from TXN_COMPONENTS where tc_txnid = " + txnid;
+           LOG.debug("Going to execute insert <" + s + ">");
+ 
+           if ((stmt.executeUpdate(s)) < 1) {
+             //this can be reasonable for an empty txn START/COMMIT or read-only txn
+             //also an IUD with DP that didn't match any rows.
+             LOG.info("Expected to move at least one record from txn_components to " +
+                     "completed_txn_components when committing txn! " + JavaUtils.txnIdToString(txnid));
+           }
+         } else {
+           if (rqst.isSetWriteEventInfos()) {
+             List<String> rows = new ArrayList<>();
+             for (WriteEventInfo writeEventInfo : rqst.getWriteEventInfos()) {
+               rows.add(txnid + "," + quoteString(writeEventInfo.getDatabase()) + "," +
+                       quoteString(writeEventInfo.getTable()) + "," +
+                       quoteString(writeEventInfo.getPartition()) + "," +
+                       writeEventInfo.getWriteId() + "," +
+                       "'" + isUpdateDelete + "'");
+             }
+             List<String> queries = sqlGenerator.createInsertValuesStmt("COMPLETED_TXN_COMPONENTS " +
+                     "(ctc_txnid," + " ctc_database, ctc_table, ctc_partition, ctc_writeid, ctc_update_delete)", rows);
+             for (String q : queries) {
+               LOG.debug("Going to execute insert  <" + q + "> ");
+               stmt.execute(q);
+             }
+           }
+ 
+           s = "delete from REPL_TXN_MAP where RTM_SRC_TXN_ID = " + sourceTxnId +
+                   " and RTM_REPL_POLICY = " + quoteString(rqst.getReplPolicy());
+           LOG.info("Repl going to execute  <" + s + ">");
+           stmt.executeUpdate(s);
+         }
+ 
+         // cleanup all txn related metadata
+         s = "delete from TXN_COMPONENTS where tc_txnid = " + txnid;
+         LOG.debug("Going to execute update <" + s + ">");
+         stmt.executeUpdate(s);
+         s = "delete from HIVE_LOCKS where hl_txnid = " + txnid;
+         LOG.debug("Going to execute update <" + s + ">");
+         stmt.executeUpdate(s);
+         s = "delete from TXNS where txn_id = " + txnid;
+         LOG.debug("Going to execute update <" + s + ">");
+         stmt.executeUpdate(s);
+         s = "delete from MIN_HISTORY_LEVEL where mhl_txnid = " + txnid;
+         LOG.debug("Going to execute update <" + s + ">");
+         stmt.executeUpdate(s);
+         LOG.info("Removed committed transaction: (" + txnid + ") from MIN_HISTORY_LEVEL");
+ 
+         s = "delete from MATERIALIZATION_REBUILD_LOCKS where mrl_txn_id = " + txnid;
+         LOG.debug("Going to execute update <" + s + ">");
+         stmt.executeUpdate(s);
+ 
+         if (transactionalListeners != null) {
+           MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners,
+                   EventMessage.EventType.COMMIT_TXN, new CommitTxnEvent(txnid, null), dbConn, sqlGenerator);
+         }
+ 
+         LOG.debug("Going to commit");
+         close(rs);
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "commitTxn(" + rqst + ")");
+         throw new MetaException("Unable to update transaction database "
+           + StringUtils.stringifyException(e));
+       } finally {
+         close(commitIdRs);
+         close(lockHandle, stmt, dbConn);
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       commitTxn(rqst);
+     }
+   }
+ 
+   /**
+    * Replicate Table Write Ids state to mark aborted write ids and writeid high water mark.
+    * @param rqst info on table/partitions and writeid snapshot to replicate.
+    * @throws MetaException
+    */
+   @Override
+   @RetrySemantics.Idempotent("No-op if already replicated the writeid state")
+   public void replTableWriteIdState(ReplTblWriteIdStateRequest rqst) throws MetaException {
+     String dbName = rqst.getDbName().toLowerCase();
+     String tblName = rqst.getTableName().toLowerCase();
+     ValidWriteIdList validWriteIdList = new ValidReaderWriteIdList(rqst.getValidWriteIdlist());
+ 
+     // Get the abortedWriteIds which are already sorted in ascending order.
+     List<Long> abortedWriteIds = getAbortedWriteIds(validWriteIdList);
+     int numAbortedWrites = abortedWriteIds.size();
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet rs = null;
+       TxnStore.MutexAPI.LockHandle handle = null;
+       try {
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+ 
+         // Check if this txn state is already replicated for this given table. If yes, then it is
+         // idempotent case and just return.
+         String sql = "select nwi_next from NEXT_WRITE_ID where nwi_database = " + quoteString(dbName)
+                         + " and nwi_table = " + quoteString(tblName);
+         LOG.debug("Going to execute query <" + sql + ">");
+ 
+         rs = stmt.executeQuery(sql);
+         if (rs.next()) {
+           LOG.info("Idempotent flow: WriteId state <" + validWriteIdList + "> is already applied for the table: "
+                   + dbName + "." + tblName);
+           rollbackDBConn(dbConn);
+           return;
+         }
+ 
+         if (numAbortedWrites > 0) {
+           // Allocate/Map one txn per aborted writeId and abort the txn to mark writeid as aborted.
+           List<Long> txnIds = openTxns(dbConn, stmt,
+                   new OpenTxnRequest(numAbortedWrites, rqst.getUser(), rqst.getHostName()));
+           assert(numAbortedWrites == txnIds.size());
+ 
+           // Map each aborted write id with each allocated txn.
+           List<String> rows = new ArrayList<>();
+           int i = 0;
+           for (long txn : txnIds) {
+             long writeId = abortedWriteIds.get(i++);
+             rows.add(txn + ", " + quoteString(dbName) + ", " + quoteString(tblName) + ", " + writeId);
+             LOG.info("Allocated writeID: " + writeId + " for txnId: " + txn);
+           }
+ 
+           // Insert entries to TXN_TO_WRITE_ID for aborted write ids
+           List<String> inserts = sqlGenerator.createInsertValuesStmt(
+                   "TXN_TO_WRITE_ID (t2w_txnid, t2w_database, t2w_table, t2w_writeid)", rows);
+           for (String insert : inserts) {
+             LOG.debug("Going to execute insert <" + insert + ">");
+             stmt.execute(insert);
+           }
+ 
+           // Abort all the allocated txns so that the mapped write ids are referred as aborted ones.
+           int numAborts = abortTxns(dbConn, txnIds, true);
+           assert(numAborts == numAbortedWrites);
+         }
+         handle = getMutexAPI().acquireLock(MUTEX_KEY.WriteIdAllocator.name());
+ 
+         // There are some txns in the list which has no write id allocated and hence go ahead and do it.
+         // Get the next write id for the given table and update it with new next write id.
+         // It is expected NEXT_WRITE_ID doesn't have entry for this table and hence directly insert it.
+         long nextWriteId = validWriteIdList.getHighWatermark() + 1;
+ 
+         // First allocation of write id (hwm+1) should add the table to the next_write_id meta table.
+         sql = "insert into NEXT_WRITE_ID (nwi_database, nwi_table, nwi_next) values ("
+                 + quoteString(dbName) + "," + quoteString(tblName) + ","
+                 + Long.toString(nextWriteId) + ")";
+         LOG.debug("Going to execute insert <" + sql + ">");
+         stmt.execute(sql);
+ 
+         LOG.info("WriteId state <" + validWriteIdList + "> is applied for the table: " + dbName + "." + tblName);
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "replTableWriteIdState(" + rqst + ")");
+         throw new MetaException("Unable to update transaction database "
+                 + StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+         if(handle != null) {
+           handle.releaseLocks();
+         }
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       replTableWriteIdState(rqst);
+     }
+ 
+     // Schedule Major compaction on all the partitions/table to clean aborted data
+     if (numAbortedWrites > 0) {
+       CompactionRequest compactRqst = new CompactionRequest(rqst.getDbName(), rqst.getTableName(),
+               CompactionType.MAJOR);
+       if (rqst.isSetPartNames()) {
+         for (String partName : rqst.getPartNames()) {
+           compactRqst.setPartitionname(partName);
+           compact(compactRqst);
+         }
+       } else {
+         compact(compactRqst);
+       }
+     }
+   }
+ 
+   private List<Long> getAbortedWriteIds(ValidWriteIdList validWriteIdList) {
+     List<Long> abortedWriteIds = new ArrayList<>();
+     for (long writeId : validWriteIdList.getInvalidWriteIds()) {
+       if (validWriteIdList.isWriteIdAborted(writeId)) {
+         abortedWriteIds.add(writeId);
+       }
+     }
+     return abortedWriteIds;
+   }
+ 
+   @Override
+   @RetrySemantics.ReadOnly
+   public GetValidWriteIdsResponse getValidWriteIds(GetValidWriteIdsRequest rqst)
+           throws NoSuchTxnException, MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       ValidTxnList validTxnList;
+ 
+       // We should prepare the valid write ids list based on validTxnList of current txn.
+       // If no txn exists in the caller, then they would pass null for validTxnList and so it is
+       // required to get the current state of txns to make validTxnList
+       if (rqst.isSetValidTxnList()) {
+         validTxnList = new ValidReadTxnList(rqst.getValidTxnList());
+       } else {
+         // Passing 0 for currentTxn means, this validTxnList is not wrt to any txn
+         validTxnList = TxnUtils.createValidReadTxnList(getOpenTxns(), 0);
+       }
+       try {
+         /**
+          * This runs at READ_COMMITTED for exactly the same reason as {@link #getOpenTxnsInfo()}
+          */
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+ 
+         // Get the valid write id list for all the tables read by the current txn
+         List<TableValidWriteIds> tblValidWriteIdsList = new ArrayList<>();
+         for (String fullTableName : rqst.getFullTableNames()) {
+           tblValidWriteIdsList.add(getValidWriteIdsForTable(stmt, fullTableName, validTxnList));
+         }
+ 
+         LOG.debug("Going to rollback");
+         dbConn.rollback();
+         GetValidWriteIdsResponse owr = new GetValidWriteIdsResponse(tblValidWriteIdsList);
+         return owr;
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "getValidWriteIds");
+         throw new MetaException("Unable to select from transaction database, "
+                 + StringUtils.stringifyException(e));
+       } finally {
+         close(null, stmt, dbConn);
+       }
+     } catch (RetryException e) {
+       return getValidWriteIds(rqst);
+     }
+   }
+ 
+   // Method to get the Valid write ids list for the given table
+   // Input fullTableName is expected to be of format <db_name>.<table_name>
+   private TableValidWriteIds getValidWriteIdsForTable(Statement stmt, String fullTableName,
+                                                ValidTxnList validTxnList) throws SQLException {
+     ResultSet rs = null;
+     String[] names = TxnUtils.getDbTableName(fullTableName);
+     try {
+       // Need to initialize to 0 to make sure if nobody modified this table, then current txn
+       // shouldn't read any data.
+       // If there is a conversion from non-acid to acid table, then by default 0 would be assigned as
+       // writeId for data from non-acid table and so writeIdHwm=0 would ensure those data are readable by any txns.
+       long writeIdHwm = 0;
+       List<Long> invalidWriteIdList = new ArrayList<>();
+       long minOpenWriteId = Long.MAX_VALUE;
+       BitSet abortedBits = new BitSet();
+       long txnHwm = validTxnList.getHighWatermark();
+ 
+       // Find the writeId high water mark based upon txnId high water mark. If found, then, need to
+       // traverse through all write Ids less than writeId HWM to make exceptions list.
+       // The writeHWM = min(NEXT_WRITE_ID.nwi_next-1, max(TXN_TO_WRITE_ID.t2w_writeid under txnHwm))
+       String s = "select max(t2w_writeid) from TXN_TO_WRITE_ID where t2w_txnid <= " + txnHwm
+               + " and t2w_database = " + quoteString(names[0])
+               + " and t2w_table = " + quoteString(names[1]);
+       LOG.debug("Going to execute query<" + s + ">");
+       rs = stmt.executeQuery(s);
+       if (rs.next()) {
+         writeIdHwm = rs.getLong(1);
+       }
+ 
+       // If no writeIds allocated by txns under txnHwm, then find writeHwm from NEXT_WRITE_ID.
+       if (writeIdHwm <= 0) {
+         // Need to subtract 1 as nwi_next would be the next write id to be allocated but we need highest
+         // allocated write id.
+         s = "select nwi_next-1 from NEXT_WRITE_ID where nwi_database = " + quoteString(names[0])
+                 + " and nwi_table = " + quoteString(names[1]);
+         LOG.debug("Going to execute query<" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (rs.next()) {
+           long maxWriteId = rs.getLong(1);
+           if (maxWriteId > 0) {
+             writeIdHwm = (writeIdHwm > 0) ? Math.min(maxWriteId, writeIdHwm) : maxWriteId;
+           }
+         }
+       }
+ 
+       // As writeIdHwm is known, query all writeIds under the writeId HWM.
+       // If any writeId under HWM is allocated by txn > txnId HWM or belongs to open/aborted txns,
+       // then will be added to invalid list. The results should be sorted in ascending order based
+       // on write id. The sorting is needed as exceptions list in ValidWriteIdList would be looked-up
+       // using binary search.
+       s = "select t2w_txnid, t2w_writeid from TXN_TO_WRITE_ID where t2w_writeid <= " + writeIdHwm
+               + " and t2w_database = " + quoteString(names[0])
+               + " and t2w_table = " + quoteString(names[1])
+               + " order by t2w_writeid asc";
+ 
+       LOG.debug("Going to execute query<" + s + ">");
+       rs = stmt.executeQuery(s);
+       while (rs.next()) {
+         long txnId = rs.getLong(1);
+         long writeId = rs.getLong(2);
+         if (validTxnList.isTxnValid(txnId)) {
+           // Skip if the transaction under evaluation is already committed.
+           continue;
+         }
+ 
+         // The current txn is either in open or aborted state.
+         // Mark the write ids state as per the txn state.
+         invalidWriteIdList.add(writeId);
+         if (validTxnList.isTxnAborted(txnId)) {
+           abortedBits.set(invalidWriteIdList.size() - 1);
+         } else {
+           minOpenWriteId = Math.min(minOpenWriteId, writeId);
+         }
+       }
+ 
+       ByteBuffer byteBuffer = ByteBuffer.wrap(abortedBits.toByteArray());
+       TableValidWriteIds owi = new TableValidWriteIds(fullTableName, writeIdHwm, invalidWriteIdList, byteBuffer);
+       if (minOpenWriteId < Long.MAX_VALUE) {
+         owi.setMinOpenWriteId(minOpenWriteId);
+       }
+       return owi;
+     } finally {
+       close(rs);
+     }
+   }
+ 
+   @Override
+   @RetrySemantics.Idempotent
+   public AllocateTableWriteIdsResponse allocateTableWriteIds(AllocateTableWriteIdsRequest rqst)
+           throws NoSuchTxnException, TxnAbortedException, MetaException {
+     List<Long> txnIds;
+     String dbName = rqst.getDbName().toLowerCase();
+     String tblName = rqst.getTableName().toLowerCase();
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet rs = null;
+       TxnStore.MutexAPI.LockHandle handle = null;
+       List<TxnToWriteId> txnToWriteIds = new ArrayList<>();
+       List<TxnToWriteId> srcTxnToWriteIds = null;
+       try {
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+ 
+         if (rqst.isSetReplPolicy()) {
+           srcTxnToWriteIds = rqst.getSrcTxnToWriteIdList();
+           List<Long> srcTxnIds = new ArrayList<>();
+           assert (rqst.isSetSrcTxnToWriteIdList());
+           assert (!rqst.isSetTxnIds());
+           assert (!srcTxnToWriteIds.isEmpty());
+ 
+           for (TxnToWriteId txnToWriteId :  srcTxnToWriteIds) {
+             srcTxnIds.add(txnToWriteId.getTxnId());
+           }
+           txnIds = getTargetTxnIdList(rqst.getReplPolicy(), srcTxnIds, stmt);
+           if (srcTxnIds.size() != txnIds.size()) {
+             LOG.warn("Target txn id is missing for source txn id : " + srcTxnIds.toString() +
+                     " and repl policy " + rqst.getReplPolicy());
+             throw new RuntimeException("This should never happen for txnIds: " + txnIds);
+           }
+         } else {
+           assert (!rqst.isSetSrcTxnToWriteIdList());
+           assert (rqst.isSetTxnIds());
+           txnIds = rqst.getTxnIds();
+         }
+ 
+         Collections.sort(txnIds); //easier to read logs and for assumption done in replication flow
+ 
+         // Check if all the input txns are in open state. Write ID should be allocated only for open transactions.
+         if (!isTxnsInOpenState(txnIds, stmt)) {
+           ensureAllTxnsValid(dbName, tblName, txnIds, stmt);
+           throw new RuntimeException("This should never happen for txnIds: " + txnIds);
+         }
+ 
+         long writeId;
+         String s;
+         long allocatedTxnsCount = 0;
+         long txnId;
+         List<String> queries = new ArrayList<>();
+         StringBuilder prefix = new StringBuilder();
+         StringBuilder suffix = new StringBuilder();
+ 
+         // Traverse the TXN_TO_WRITE_ID to see if any of the input txns already have allocated a
+         // write id for the same db.table. If yes, then need to reuse it else have to allocate new one
+         // The write id would have been already allocated in case of multi-statement txns where
+         // first write on a table will allocate write id and rest of the writes should re-use it.
+         prefix.append("select t2w_txnid, t2w_writeid from TXN_TO_WRITE_ID where"
+                         + " t2w_database = " + quoteString(dbName)
+                         + " and t2w_table = " + quoteString(tblName) + " and ");
+         suffix.append("");
+         TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix,
+                 txnIds, "t2w_txnid", false, false);
+         for (String query : queries) {
+           LOG.debug("Going to execute query <" + query + ">");
+           rs = stmt.executeQuery(query);
+           while (rs.next()) {
+             // If table write ID is already allocated for the given transaction, then just use it
+             txnId = rs.getLong(1);
+             writeId = rs.getLong(2);
+             txnToWriteIds.add(new TxnToWriteId(txnId, writeId));
+             allocatedTxnsCount++;
+             LOG.info("Reused already allocated writeID: " + writeId + " for txnId: " + txnId);
+           }
+         }
+ 
+         // Batch allocation should always happen atomically. Either write ids for all txns is allocated or none.
+         long numOfWriteIds = txnIds.size();
+         assert ((allocatedTxnsCount == 0) || (numOfWriteIds == allocatedTxnsCount));
+         if (allocatedTxnsCount == numOfWriteIds) {
+           // If all the txns in the list have pre-allocated write ids for the given table, then just return.
+           // This is for idempotent case.
+           return new AllocateTableWriteIdsResponse(txnToWriteIds);
+         }
+ 
+         handle = getMutexAPI().acquireLock(MUTEX_KEY.WriteIdAllocator.name());
+ 
+         // There are some txns in the list which does not have write id allocated and hence go ahead and do it.
+         // Get the next write id for the given table and update it with new next write id.
+         // This is select for update query which takes a lock if the table entry is already there in NEXT_WRITE_ID
+         s = sqlGenerator.addForUpdateClause(
+                 "select nwi_next from NEXT_WRITE_ID where nwi_database = " + quoteString(dbName)
+                         + " and nwi_table = " + quoteString(tblName));
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (!rs.next()) {
+           // First allocation of write id should add the table to the next_write_id meta table
+           // The initial value for write id should be 1 and hence we add 1 with number of write ids allocated here
+           writeId = 1;
+           s = "insert into NEXT_WRITE_ID (nwi_database, nwi_table, nwi_next) values ("
+                   + quoteString(dbName) + "," + quoteString(tblName) + "," + Long.toString(numOfWriteIds + 1) + ")";
+           LOG.debug("Going to execute insert <" + s + ">");
+           stmt.execute(s);
+         } else {
+           writeId = rs.getLong(1);
+           // Update the NEXT_WRITE_ID for the given table after incrementing by number of write ids allocated
+           s = "update NEXT_WRITE_ID set nwi_next = " + (writeId + numOfWriteIds)
+                   + " where nwi_database = " + quoteString(dbName)
+                   + " and nwi_table = " + quoteString(tblName);
+           LOG.debug("Going to execute update <" + s + ">");
+           stmt.executeUpdate(s);
+         }
+ 
+         // Map the newly allocated write ids against the list of txns which doesn't have pre-allocated
+         // write ids
+         List<String> rows = new ArrayList<>();
+         for (long txn : txnIds) {
+           rows.add(txn + ", " + quoteString(dbName) + ", " + quoteString(tblName) + ", " + writeId);
+           txnToWriteIds.add(new TxnToWriteId(txn, writeId));
+           LOG.info("Allocated writeID: " + writeId + " for txnId: " + txn);
+           writeId++;
+         }
+ 
+         if (rqst.isSetReplPolicy()) {
+           int lastIdx = txnToWriteIds.size()-1;
+           if ((txnToWriteIds.get(0).getWriteId() != srcTxnToWriteIds.get(0).getWriteId()) ||
+               (txnToWriteIds.get(lastIdx).getWriteId() != srcTxnToWriteIds.get(lastIdx).getWriteId())) {
+             LOG.error("Allocated write id range {} is not matching with the input write id range {}.",
+                     txnToWriteIds, srcTxnToWriteIds);
+             throw new IllegalStateException("Write id allocation failed for: " + srcTxnToWriteIds);
+           }
+         }
+ 
+         // Insert entries to TXN_TO_WRITE_ID for newly allocated write ids
+         List<String> inserts = sqlGenerator.createInsertValuesStmt(
+                 "TXN_TO_WRITE_ID (t2w_txnid, t2w_database, t2w_table, t2w_writeid)", rows);
+         for (String insert : inserts) {
+           LOG.debug("Going to execute insert <" + insert + ">");
+           stmt.execute(insert);
+         }
+ 
+         if (transactionalListeners != null) {
+           MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners,
+                   EventMessage.EventType.ALLOC_WRITE_ID,
+                   new AllocWriteIdEvent(txnToWriteIds, rqst.getDbName(), rqst.getTableName(), null),
+                   dbConn, sqlGenerator);
+         }
+ 
+         LOG.debug("Going to commit");
+         dbConn.commit();
+         return new AllocateTableWriteIdsResponse(txnToWriteIds);
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "allocateTableWriteIds(" + rqst + ")");
+         throw new MetaException("Unable to update transaction database "
+                 + StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+         if(handle != null) {
+           handle.releaseLocks();
+         }
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       return allocateTableWriteIds(rqst);
+     }
+   }
+   @Override
+   public void seedWriteIdOnAcidConversion(InitializeTableWriteIdsRequest rqst)
+       throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       TxnStore.MutexAPI.LockHandle handle = null;
+       try {
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+ 
+         handle = getMutexAPI().acquireLock(MUTEX_KEY.WriteIdAllocator.name());
+         //since this is on conversion from non-acid to acid, NEXT_WRITE_ID should not have an entry
+         //for this table.  It also has a unique index in case 'should not' is violated
+ 
+         // First allocation of write id should add the table to the next_write_id meta table
+         // The initial value for write id should be 1 and hence we add 1 with number of write ids
+         // allocated here
+         String s = "insert into NEXT_WRITE_ID (nwi_database, nwi_table, nwi_next) values ("
+             + quoteString(rqst.getDbName()) + "," + quoteString(rqst.getTblName()) + "," +
+             Long.toString(rqst.getSeeWriteId() + 1) + ")";
+         LOG.debug("Going to execute insert <" + s + ">");
+         stmt.execute(s);
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "seedWriteIdOnAcidConversion(" + rqst + ")");
+         throw new MetaException("Unable to update transaction database "
+             + StringUtils.stringifyException(e));
+       } finally {
+         close(null, stmt, dbConn);
+         if(handle != null) {
+           handle.releaseLocks();
+         }
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       seedWriteIdOnAcidConversion(rqst);
+     }
+ 
+   }
+   @Override
+   @RetrySemantics.Idempotent
+   public void addWriteNotificationLog(AcidWriteEvent acidWriteEvent)
+           throws MetaException {
+     Connection dbConn = null;
+     try {
+       try {
+         //Idempotent case is handled by notify Event
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners,
+                 EventMessage.EventType.ACID_WRITE, acidWriteEvent, dbConn, sqlGenerator);
+         LOG.debug("Going to commit");
+         dbConn.commit();
+         return;
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         if (isDuplicateKeyError(e)) {
+           // in case of key duplicate error, retry as it might be because of race condition
+           if (waitForRetry("addWriteNotificationLog(" + acidWriteEvent + ")", e.getMessage())) {
+             throw new RetryException();
+           }
+           retryNum = 0;
+           throw new MetaException(e.getMessage());
+         }
+         checkRetryable(dbConn, e, "addWriteNotificationLog(" + acidWriteEvent + ")");
+         throw new MetaException("Unable to add write notification event " + StringUtils.stringifyException(e));
+       } finally{
+         closeDbConn(dbConn);
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       addWriteNotificationLog(acidWriteEvent);
+     }
+   }
+ 
+   @Override
+   @RetrySemantics.SafeToRetry
+   public void performWriteSetGC() {
+     Connection dbConn = null;
+     Statement stmt = null;
+     ResultSet rs = null;
+     try {
+       dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+       stmt = dbConn.createStatement();
+       rs = stmt.executeQuery("select ntxn_next - 1 from NEXT_TXN_ID");
+       if(!rs.next()) {
+         throw new IllegalStateException("NEXT_TXN_ID is empty: DB is corrupted");
+       }
+       long highestAllocatedTxnId = rs.getLong(1);
+       close(rs);
+       rs = stmt.executeQuery("select min(txn_id) from TXNS where txn_state=" + quoteChar(TXN_OPEN));
+       if(!rs.next()) {
+         throw new IllegalStateException("Scalar query returned no rows?!?!!");
+       }
+       long commitHighWaterMark;//all currently open txns (if any) have txnid >= than commitHighWaterMark
+       long lowestOpenTxnId = rs.getLong(1);
+       if(rs.wasNull()) {
+         //if here then there are no Open txns and  highestAllocatedTxnId must be
+         //resolved (i.e. committed or aborted), either way
+         //there are no open txns with id <= highestAllocatedTxnId
+         //the +1 is there because "delete ..." below has < (which is correct for the case when
+         //there is an open txn
+         //Concurrency: even if new txn starts (or starts + commits) it is still true that
+         //there are no currently open txns that overlap with any committed txn with 
+         //commitId <= commitHighWaterMark (as set on next line).  So plain READ_COMMITTED is enough.
+         commitHighWaterMark = highestAllocatedTxnId + 1;
+       }
+       else {
+         commitHighWaterMark = lowestOpenTxnId;
+       }
+       int delCnt = stmt.executeUpdate("delete from WRITE_SET where ws_commit_id < " + commitHighWaterMark);
+       LOG.info("Deleted " + delCnt + " obsolete rows from WRTIE_SET");
+       dbConn.commit();
+     } catch (SQLException ex) {
+       LOG.warn("WriteSet GC failed due to " + getMessage(ex), ex);
+     }
+     finally {
+       close(rs, stmt, dbConn);
+     }
+   }
+ 
+   /**
+    * Get invalidation info for the materialization. Currently, the materialization information
+    * only contains information about whether there was update/delete operations on the source
+    * tables used by the materialization since it was created.
+    */
+   @Override
+   @RetrySemantics.ReadOnly
+   public Materialization getMaterializationInvalidationInfo(
+       CreationMetadata creationMetadata, String validTxnListStr) throws MetaException {
+     if (creationMetadata.getTablesUsed().isEmpty()) {
+       // Bail out
+       LOG.warn("Materialization creation metadata does not contain any table");
+       return null;
+     }
+ 
+     // Parse validTxnList
+     final ValidReadTxnList validTxnList =
+         new ValidReadTxnList(validTxnListStr);
+ 
+     // Parse validReaderWriteIdList from creation metadata
+     final ValidTxnWriteIdList validReaderWriteIdList =
+         new ValidTxnWriteIdList(creationMetadata.getValidTxnList());
+ 
+     // We are composing a query that returns a single row if an update happened after
+     // the materialization was created. Otherwise, query returns 0 rows.
+     Connection dbConn = null;
+     Statement stmt = null;
+     ResultSet rs = null;
+     try {
+       dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+       stmt = dbConn.createStatement();
+       stmt.setMaxRows(1);
+       StringBuilder query = new StringBuilder();
+       // compose a query that select transactions containing an update...
+       query.append("select ctc_update_delete from COMPLETED_TXN_COMPONENTS where ctc_update_delete='Y' AND (");
+       int i = 0;
+       for (String fullyQualifiedName : creationMetadata.getTablesUsed()) {
+         // ...for each of the tables that are part of the materialized view,
+         // where the transaction had to be committed after the materialization was created...
+         if (i != 0) {
+           query.append("OR");
+         }
+         String[] names = TxnUtils.getDbTableName(fullyQualifiedName);
+         query.append(" (ctc_database=" + quoteString(names[0]) + " AND ctc_table=" + quoteString(names[1]));
+         ValidWriteIdList tblValidWriteIdList =
+             validReaderWriteIdList.getTableValidWriteIdList(fullyQualifiedName);
+         if (tblValidWriteIdList == null) {
+           LOG.warn("ValidWriteIdList for table {} not present in creation metadata, this should not happen");
+           return null;
+         }
+         query.append(" AND (ctc_writeid > " + tblValidWriteIdList.getHighWatermark());
+         query.append(tblValidWriteIdList.getInvalidWriteIds().length == 0 ? ") " :
+             " OR ctc_writeid IN(" + StringUtils.join(",",
+                 Arrays.asList(ArrayUtils.toObject(tblValidWriteIdList.getInvalidWriteIds()))) + ") ");
+         query.append(") ");
+         i++;
+       }
+       // ... and where the transaction has already been committed as per snapshot taken
+       // when we are running current query
+       query.append(") AND ctc_txnid <= " + validTxnList.getHighWatermark());
+       query.append(validTxnList.getInvalidTransactions().length == 0 ? " " :
+           " AND ctc_txnid NOT IN(" + StringUtils.join(",",
+               Arrays.asList(ArrayUtils.toObject(validTxnList.getInvalidTransactions()))) + ") ");
+ 
+       // Execute query
+       String s = query.toString();
+       if (LOG.isDebugEnabled()) {
+         LOG.debug("Going to execute query <" + s + ">");
+       }
+       rs = stmt.executeQuery(s);
+ 
+       return new Materialization(rs.next());
+     } catch (SQLException ex) {
+       LOG.warn("getMaterializationInvalidationInfo failed due to " + getMessage(ex), ex);
+       throw new MetaException("Unable to retrieve materialization invalidation information due to " +
+           StringUtils.stringifyException(ex));
+     } finally {
+       close(rs, stmt, dbConn);
+     }
+   }
+ 
+   @Override
+   public LockResponse lockMaterializationRebuild(String dbName, String tableName, long txnId)
+       throws MetaException {
+ 
+     if (LOG.isDebugEnabled()) {
+       LOG.debug("Acquiring lock for materialization rebuild with txnId={} for {}", txnId, Warehouse.getQualifiedName(dbName,tableName));
+     }
+ 
+     TxnStore.MutexAPI.LockHandle handle = null;
+     Connection dbConn = null;
+     Statement stmt = null;
+     ResultSet rs = null;
+     try {
+       lockInternal();
+       /**
+        * MUTEX_KEY.MaterializationRebuild lock ensures that there is only 1 entry in
+        * Initiated/Working state for any resource. This ensures we do not run concurrent
+        * rebuild operations on any materialization.
+        */
+       handle = getMutexAPI().acquireLock(MUTEX_KEY.MaterializationRebuild.name());
+       dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+       stmt = dbConn.createStatement();
+ 
+       String selectQ = "select mrl_txn_id from MATERIALIZATION_REBUILD_LOCKS where" +
+           " mrl_db_name =" + quoteString(dbName) +
+           " AND mrl_tbl_name=" + quoteString(tableName);
+       LOG.debug("Going to execute query <" + selectQ + ">");
+       rs = stmt.executeQuery(selectQ);
+       if(rs.next()) {
+         LOG.info("Ignoring request to rebuild " + dbName + "/" + tableName +
+             " since it is already being rebuilt");
+         return new LockResponse(txnId, LockState.NOT_ACQUIRED);
+       }
+       String insertQ = "insert into MATERIALIZATION_REBUILD_LOCKS " +
+           "(mrl_txn_id, mrl_db_name, mrl_tbl_name, mrl_last_heartbeat) values (" + txnId +
+           ", '" + dbName + "', '" + tableName + "', " + Instant.now().toEpochMilli() + ")";
+       LOG.debug("Going to execute update <" + insertQ + ">");
+       stmt.executeUpdate(insertQ);
+       LOG.debug("Going to commit");
+       dbConn.commit();
+       return new LockResponse(txnId, LockState.ACQUIRED);
+     } catch (SQLException ex) {
+       LOG.warn("lockMaterializationRebuild failed due to " + getMessage(ex), ex);
+       throw new MetaException("Unable to retrieve materialization invalidation information due to " +
+           StringUtils.stringifyException(ex));
+     } finally {
+       close(rs, stmt, dbConn);
+       if(handle != null) {
+         handle.releaseLocks();
+       }
+       unlockInternal();
+     }
+   }
+ 
+   @Override
+   public boolean heartbeatLockMaterializationRebuild(String dbName, String tableName, long txnId)
+       throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet rs = null;
+       try {
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "update MATERIALIZATION_REBUILD_LOCKS" +
+             " set mrl_last_heartbeat = " + Instant.now().toEpochMilli() +
+             " where mrl_txn_id = " + txnId +
+             " AND mrl_db_name =" + quoteString(dbName) +
+             " AND mrl_tbl_name=" + quoteString(tableName);
+         LOG.debug("Going to execute update <" + s + ">");
+         int rc = stmt.executeUpdate(s);
+         if (rc < 1) {
+           LOG.debug("Going to rollback");
+           dbConn.rollback();
+           LOG.info("No lock found for rebuild of " + Warehouse.getQualifiedName(dbName, tableName) +
+               " when trying to heartbeat");
+           // It could not be renewed, return that information
+           return false;
+         }
+         LOG.debug("Going to commit");
+         dbConn.commit();
+         // It could be renewed, return that information
+         return true;
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e,
+             "heartbeatLockMaterializationRebuild(" + Warehouse.getQualifiedName(dbName, tableName) + ", " + txnId + ")");
+         throw new MetaException("Unable to heartbeat rebuild lock due to " +
+             StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       return heartbeatLockMaterializationRebuild(dbName, tableName ,txnId);
+     }
+   }
+ 
+   @Override
+   public long cleanupMaterializationRebuildLocks(ValidTxnList validTxnList, long timeout) throws MetaException {
+     try {
+       // Aux values
+       long cnt = 0L;
+       List<Long> txnIds = new ArrayList<>();
+       long timeoutTime = Instant.now().toEpochMilli() - timeout;
+ 
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet rs = null;
+       try {
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+ 
+         String selectQ = "select mrl_txn_id, mrl_last_heartbeat from MATERIALIZATION_REBUILD_LOCKS";
+         LOG.debug("Going to execute query <" + selectQ + ">");
+         rs = stmt.executeQuery(selectQ);
+         while(rs.next()) {
+           long lastHeartbeat = rs.getLong(2);
+           if (lastHeartbeat < timeoutTime) {
+             // The heartbeat has timeout, double check whether we can remove it
+             long txnId = rs.getLong(1);
+             if (validTxnList.isTxnValid(txnId) || validTxnList.isTxnAborted(txnId)) {
+               // Txn was committed (but notification was not received) or it was aborted.
+               // Either case, we can clean it up
+               txnIds.add(txnId);
+             }
+           }
+         }
+         if (!txnIds.isEmpty()) {
+           String deleteQ = "delete from MATERIALIZATION_REBUILD_LOCKS where" +
+               " mrl_txn_id IN(" + StringUtils.join(",", txnIds) + ") ";
+           LOG.debug("Going to execute update <" + deleteQ + ">");
+           cnt = stmt.executeUpdate(deleteQ);
+         }
+         LOG.debug("Going to commit");
+         dbConn.commit();
+         return cnt;
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "cleanupMaterializationRebuildLocks");
+    

<TRUNCATED>

[26/50] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0719

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 0000000,92e2805..70edb96
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@@ -1,0 -1,3422 +1,3597 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependCatalogToDbName;
+ 
+ import java.io.IOException;
+ import java.lang.reflect.Constructor;
+ import java.lang.reflect.InvocationHandler;
+ import java.lang.reflect.InvocationTargetException;
+ import java.lang.reflect.Method;
+ import java.lang.reflect.Proxy;
+ import java.net.InetAddress;
+ import java.net.URI;
+ import java.net.UnknownHostException;
+ import java.nio.ByteBuffer;
+ import java.security.PrivilegedExceptionAction;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collection;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.Iterator;
+ import java.util.LinkedHashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Map.Entry;
+ import java.util.NoSuchElementException;
+ import java.util.Random;
+ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.atomic.AtomicInteger;
+ 
+ import javax.security.auth.login.LoginException;
+ 
+ import org.apache.hadoop.classification.InterfaceAudience;
+ import org.apache.hadoop.classification.InterfaceStability;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.StatsSetupConst;
+ import org.apache.hadoop.hive.common.ValidTxnList;
+ import org.apache.hadoop.hive.common.ValidWriteIdList;
+ import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.hooks.URIResolverHook;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+ import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+ import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.hadoop.hive.metastore.utils.ObjectPair;
+ import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+ import org.apache.hadoop.security.UserGroupInformation;
+ import org.apache.hadoop.util.ReflectionUtils;
+ import org.apache.hadoop.util.StringUtils;
+ import org.apache.thrift.TApplicationException;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.protocol.TBinaryProtocol;
+ import org.apache.thrift.protocol.TCompactProtocol;
+ import org.apache.thrift.protocol.TProtocol;
+ import org.apache.thrift.transport.TFramedTransport;
+ import org.apache.thrift.transport.TSocket;
+ import org.apache.thrift.transport.TTransport;
+ import org.apache.thrift.transport.TTransportException;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import com.google.common.annotations.VisibleForTesting;
+ import com.google.common.collect.Lists;
+ 
+ /**
+  * Hive Metastore Client.
+  * The public implementation of IMetaStoreClient. Methods not inherited from IMetaStoreClient
+  * are not public and can change. Hence this is marked as unstable.
+  * For users who require retry mechanism when the connection between metastore and client is
+  * broken, RetryingMetaStoreClient class should be used.
+  */
+ @InterfaceAudience.Public
+ @InterfaceStability.Evolving
+ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
+   /**
+    * Capabilities of the current client. If this client talks to a MetaStore server in a manner
+    * implying the usage of some expanded features that require client-side support that this client
+    * doesn't have (e.g. a getting a table of a new type), it will get back failures when the
+    * capability checking is enabled (the default).
+    */
+   public final static ClientCapabilities VERSION = new ClientCapabilities(
+       Lists.newArrayList(ClientCapability.INSERT_ONLY_TABLES));
+   // Test capability for tests.
+   public final static ClientCapabilities TEST_VERSION = new ClientCapabilities(
+       Lists.newArrayList(ClientCapability.INSERT_ONLY_TABLES, ClientCapability.TEST_CAPABILITY));
+ 
+   ThriftHiveMetastore.Iface client = null;
+   private TTransport transport = null;
+   private boolean isConnected = false;
+   private URI metastoreUris[];
+   private final HiveMetaHookLoader hookLoader;
+   protected final Configuration conf;  // Keep a copy of HiveConf so if Session conf changes, we may need to get a new HMS client.
+   private String tokenStrForm;
+   private final boolean localMetaStore;
+   private final MetaStoreFilterHook filterHook;
+   private final URIResolverHook uriResolverHook;
+   private final int fileMetadataBatchSize;
+ 
+   private Map<String, String> currentMetaVars;
+ 
+   private static final AtomicInteger connCount = new AtomicInteger(0);
+ 
+   // for thrift connects
+   private int retries = 5;
+   private long retryDelaySeconds = 0;
+   private final ClientCapabilities version;
+ 
+   //copied from ErrorMsg.java
+   private static final String REPL_EVENTS_MISSING_IN_METASTORE = "Notification events are missing in the meta store.";
 -  
++
+   static final protected Logger LOG = LoggerFactory.getLogger(HiveMetaStoreClient.class);
+ 
+   public HiveMetaStoreClient(Configuration conf) throws MetaException {
+     this(conf, null, true);
+   }
+ 
+   public HiveMetaStoreClient(Configuration conf, HiveMetaHookLoader hookLoader) throws MetaException {
+     this(conf, hookLoader, true);
+   }
+ 
+   public HiveMetaStoreClient(Configuration conf, HiveMetaHookLoader hookLoader, Boolean allowEmbedded)
+     throws MetaException {
+ 
+     this.hookLoader = hookLoader;
+     if (conf == null) {
+       conf = MetastoreConf.newMetastoreConf();
+       this.conf = conf;
+     } else {
+       this.conf = new Configuration(conf);
+     }
+     version = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) ? TEST_VERSION : VERSION;
+     filterHook = loadFilterHooks();
+     uriResolverHook = loadUriResolverHook();
+     fileMetadataBatchSize = MetastoreConf.getIntVar(
+         conf, ConfVars.BATCH_RETRIEVE_OBJECTS_MAX);
+ 
+     String msUri = MetastoreConf.getVar(conf, ConfVars.THRIFT_URIS);
+     localMetaStore = MetastoreConf.isEmbeddedMetaStore(msUri);
+     if (localMetaStore) {
+       if (!allowEmbedded) {
+         throw new MetaException("Embedded metastore is not allowed here. Please configure "
+             + ConfVars.THRIFT_URIS.toString() + "; it is currently set to [" + msUri + "]");
+       }
+       // instantiate the metastore server handler directly instead of connecting
+       // through the network
+       client = HiveMetaStore.newRetryingHMSHandler("hive client", this.conf, true);
+       isConnected = true;
+       snapshotActiveConf();
+       return;
+     }
+ 
+     // get the number retries
+     retries = MetastoreConf.getIntVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES);
+     retryDelaySeconds = MetastoreConf.getTimeVar(conf,
+         ConfVars.CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS);
+ 
+     // user wants file store based configuration
+     if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URIS) != null) {
+       resolveUris();
+     } else {
+       LOG.error("NOT getting uris from conf");
+       throw new MetaException("MetaStoreURIs not found in conf file");
+     }
+ 
+     //If HADOOP_PROXY_USER is set in env or property,
+     //then need to create metastore client that proxies as that user.
+     String HADOOP_PROXY_USER = "HADOOP_PROXY_USER";
+     String proxyUser = System.getenv(HADOOP_PROXY_USER);
+     if (proxyUser == null) {
+       proxyUser = System.getProperty(HADOOP_PROXY_USER);
+     }
+     //if HADOOP_PROXY_USER is set, create DelegationToken using real user
+     if(proxyUser != null) {
+       LOG.info(HADOOP_PROXY_USER + " is set. Using delegation "
+           + "token for HiveMetaStore connection.");
+       try {
+         UserGroupInformation.getLoginUser().getRealUser().doAs(
+             new PrivilegedExceptionAction<Void>() {
+               @Override
+               public Void run() throws Exception {
+                 open();
+                 return null;
+               }
+             });
+         String delegationTokenPropString = "DelegationTokenForHiveMetaStoreServer";
+         String delegationTokenStr = getDelegationToken(proxyUser, proxyUser);
+         SecurityUtils.setTokenStr(UserGroupInformation.getCurrentUser(), delegationTokenStr,
+             delegationTokenPropString);
+         MetastoreConf.setVar(this.conf, ConfVars.TOKEN_SIGNATURE, delegationTokenPropString);
+         close();
+       } catch (Exception e) {
+         LOG.error("Error while setting delegation token for " + proxyUser, e);
+         if(e instanceof MetaException) {
+           throw (MetaException)e;
+         } else {
+           throw new MetaException(e.getMessage());
+         }
+       }
+     }
+     // finally open the store
+     open();
+   }
+ 
+   private void resolveUris() throws MetaException {
+     String metastoreUrisString[] =  MetastoreConf.getVar(conf,
+             ConfVars.THRIFT_URIS).split(",");
+ 
+     List<URI> metastoreURIArray = new ArrayList<URI>();
+     try {
+       int i = 0;
+       for (String s : metastoreUrisString) {
+         URI tmpUri = new URI(s);
+         if (tmpUri.getScheme() == null) {
+           throw new IllegalArgumentException("URI: " + s
+                   + " does not have a scheme");
+         }
+         if (uriResolverHook != null) {
+           metastoreURIArray.addAll(uriResolverHook.resolveURI(tmpUri));
+         } else {
+           metastoreURIArray.add(new URI(
+                   tmpUri.getScheme(),
+                   tmpUri.getUserInfo(),
+                   HadoopThriftAuthBridge.getBridge().getCanonicalHostName(tmpUri.getHost()),
+                   tmpUri.getPort(),
+                   tmpUri.getPath(),
+                   tmpUri.getQuery(),
+                   tmpUri.getFragment()
+           ));
+         }
+       }
+       metastoreUris = new URI[metastoreURIArray.size()];
+       for (int j = 0; j < metastoreURIArray.size(); j++) {
+         metastoreUris[j] = metastoreURIArray.get(j);
+       }
+ 
+       if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URI_SELECTION).equalsIgnoreCase("RANDOM")) {
+         List uriList = Arrays.asList(metastoreUris);
+         Collections.shuffle(uriList);
+         metastoreUris = (URI[]) uriList.toArray();
+       }
+     } catch (IllegalArgumentException e) {
+       throw (e);
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+   }
+ 
+ 
+   private MetaStoreFilterHook loadFilterHooks() throws IllegalStateException {
+     Class<? extends MetaStoreFilterHook> authProviderClass = MetastoreConf.
+         getClass(conf, ConfVars.FILTER_HOOK, DefaultMetaStoreFilterHookImpl.class,
+             MetaStoreFilterHook.class);
+     String msg = "Unable to create instance of " + authProviderClass.getName() + ": ";
+     try {
+       Constructor<? extends MetaStoreFilterHook> constructor =
+           authProviderClass.getConstructor(Configuration.class);
+       return constructor.newInstance(conf);
+     } catch (NoSuchMethodException | SecurityException | IllegalAccessException | InstantiationException | IllegalArgumentException | InvocationTargetException e) {
+       throw new IllegalStateException(msg + e.getMessage(), e);
+     }
+   }
+ 
+   //multiple clients may initialize the hook at the same time
+   synchronized private URIResolverHook loadUriResolverHook() throws IllegalStateException {
+ 
+     String uriResolverClassName =
+             MetastoreConf.getAsString(conf, ConfVars.URI_RESOLVER);
+     if (uriResolverClassName.equals("")) {
+       return null;
+     } else {
+       LOG.info("Loading uri resolver" + uriResolverClassName);
+       try {
+         Class<?> uriResolverClass = Class.forName(uriResolverClassName, true,
+                 JavaUtils.getClassLoader());
+         return (URIResolverHook) ReflectionUtils.newInstance(uriResolverClass, null);
+       } catch (Exception e) {
+         LOG.error("Exception loading uri resolver hook" + e);
+         return null;
+       }
+     }
+   }
+ 
+   /**
+    * Swaps the first element of the metastoreUris array with a random element from the
+    * remainder of the array.
+    */
+   private void promoteRandomMetaStoreURI() {
+     if (metastoreUris.length <= 1) {
+       return;
+     }
+     Random rng = new Random();
+     int index = rng.nextInt(metastoreUris.length - 1) + 1;
+     URI tmp = metastoreUris[0];
+     metastoreUris[0] = metastoreUris[index];
+     metastoreUris[index] = tmp;
+   }
+ 
+   @VisibleForTesting
+   public TTransport getTTransport() {
+     return transport;
+   }
+ 
+   @Override
+   public boolean isLocalMetaStore() {
+     return localMetaStore;
+   }
+ 
+   @Override
+   public boolean isCompatibleWith(Configuration conf) {
+     // Make a copy of currentMetaVars, there is a race condition that
+ 	// currentMetaVars might be changed during the execution of the method
+     Map<String, String> currentMetaVarsCopy = currentMetaVars;
+     if (currentMetaVarsCopy == null) {
+       return false; // recreate
+     }
+     boolean compatible = true;
+     for (ConfVars oneVar : MetastoreConf.metaVars) {
+       // Since metaVars are all of different types, use string for comparison
+       String oldVar = currentMetaVarsCopy.get(oneVar.getVarname());
+       String newVar = MetastoreConf.getAsString(conf, oneVar);
+       if (oldVar == null ||
+           (oneVar.isCaseSensitive() ? !oldVar.equals(newVar) : !oldVar.equalsIgnoreCase(newVar))) {
+         LOG.info("Mestastore configuration " + oneVar.toString() +
+             " changed from " + oldVar + " to " + newVar);
+         compatible = false;
+       }
+     }
+     return compatible;
+   }
+ 
+   @Override
+   public void setHiveAddedJars(String addedJars) {
+     MetastoreConf.setVar(conf, ConfVars.ADDED_JARS, addedJars);
+   }
+ 
+   @Override
+   public void reconnect() throws MetaException {
+     if (localMetaStore) {
+       // For direct DB connections we don't yet support reestablishing connections.
+       throw new MetaException("For direct MetaStore DB connections, we don't support retries" +
+           " at the client level.");
+     } else {
+       close();
+ 
+       if (uriResolverHook != null) {
+         //for dynamic uris, re-lookup if there are new metastore locations
+         resolveUris();
+       }
+ 
+       if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URI_SELECTION).equalsIgnoreCase("RANDOM")) {
+         // Swap the first element of the metastoreUris[] with a random element from the rest
+         // of the array. Rationale being that this method will generally be called when the default
+         // connection has died and the default connection is likely to be the first array element.
+         promoteRandomMetaStoreURI();
+       }
+       open();
+     }
+   }
+ 
+   @Override
+   public void alter_table(String dbname, String tbl_name, Table new_tbl) throws TException {
+     alter_table_with_environmentContext(dbname, tbl_name, new_tbl, null);
+   }
+ 
+   @Override
+   public void alter_table(String defaultDatabaseName, String tblName, Table table,
+                           boolean cascade) throws TException {
+     EnvironmentContext environmentContext = new EnvironmentContext();
+     if (cascade) {
+       environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE);
+     }
+     alter_table_with_environmentContext(defaultDatabaseName, tblName, table, environmentContext);
+   }
+ 
+   @Override
+   public void alter_table_with_environmentContext(String dbname, String tbl_name, Table new_tbl,
+       EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException {
+     HiveMetaHook hook = getHook(new_tbl);
+     if (hook != null) {
+       hook.preAlterTable(new_tbl, envContext);
+     }
 -    client.alter_table_with_environment_context(prependCatalogToDbName(dbname, conf),
 -        tbl_name, new_tbl, envContext);
++    AlterTableRequest req = new AlterTableRequest(dbname, tbl_name, new_tbl);
++    req.setCatName(MetaStoreUtils.getDefaultCatalog(conf));
++    req.setEnvironmentContext(envContext);
++    client.alter_table_req(req);
+   }
+ 
+   @Override
+   public void alter_table(String catName, String dbName, String tblName, Table newTable,
+                          EnvironmentContext envContext) throws TException {
 -    client.alter_table_with_environment_context(prependCatalogToDbName(catName,
 -        dbName, conf), tblName, newTable, envContext);
++    // This never used to call the hook. Why? There's overload madness in metastore...
++    AlterTableRequest req = new AlterTableRequest(dbName, tblName, newTable);
++    req.setCatName(catName);
++    req.setEnvironmentContext(envContext);
++    client.alter_table_req(req);
++  }
++
++  @Override
++  public void alter_table(String catName, String dbName, String tbl_name, Table new_tbl,
++      EnvironmentContext envContext, long txnId, String validWriteIds)
++          throws InvalidOperationException, MetaException, TException {
++    HiveMetaHook hook = getHook(new_tbl);
++    if (hook != null) {
++      hook.preAlterTable(new_tbl, envContext);
++    }
++    AlterTableRequest req = new AlterTableRequest(dbName, tbl_name, new_tbl);
++    req.setCatName(catName);
++    req.setTxnId(txnId);
++    req.setValidWriteIdList(validWriteIds);
++    req.setEnvironmentContext(envContext);
++    client.alter_table_req(req);
+   }
+ 
++  @Deprecated
+   @Override
+   public void renamePartition(final String dbname, final String tableName, final List<String> part_vals,
+                               final Partition newPart) throws TException {
 -    renamePartition(getDefaultCatalog(conf), dbname, tableName, part_vals, newPart);
++    renamePartition(getDefaultCatalog(conf), dbname, tableName, part_vals, newPart, -1, null);
+   }
+ 
+   @Override
+   public void renamePartition(String catName, String dbname, String tableName, List<String> part_vals,
 -                              Partition newPart) throws TException {
 -    client.rename_partition(prependCatalogToDbName(catName, dbname, conf), tableName, part_vals, newPart);
 -
++                              Partition newPart, long txnId, String validWriteIds) throws TException {
++    RenamePartitionRequest req = new RenamePartitionRequest(dbname, tableName, part_vals, newPart);
++    req.setCatName(catName);
++    req.setTxnId(txnId);
++    req.setValidWriteIdList(validWriteIds);
++    client.rename_partition_req(req);
+   }
+ 
+   private void open() throws MetaException {
+     isConnected = false;
+     TTransportException tte = null;
+     boolean useSSL = MetastoreConf.getBoolVar(conf, ConfVars.USE_SSL);
+     boolean useSasl = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_SASL);
+     boolean useFramedTransport = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_FRAMED_TRANSPORT);
+     boolean useCompactProtocol = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_COMPACT_PROTOCOL);
+     int clientSocketTimeout = (int) MetastoreConf.getTimeVar(conf,
+         ConfVars.CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS);
+ 
+     for (int attempt = 0; !isConnected && attempt < retries; ++attempt) {
+       for (URI store : metastoreUris) {
+         LOG.info("Trying to connect to metastore with URI " + store);
+ 
+         try {
+           if (useSSL) {
+             try {
+               String trustStorePath = MetastoreConf.getVar(conf, ConfVars.SSL_TRUSTSTORE_PATH).trim();
+               if (trustStorePath.isEmpty()) {
+                 throw new IllegalArgumentException(ConfVars.SSL_TRUSTSTORE_PATH.toString()
+                     + " Not configured for SSL connection");
+               }
+               String trustStorePassword =
+                   MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.SSL_TRUSTSTORE_PASSWORD);
+ 
+               // Create an SSL socket and connect
+               transport = SecurityUtils.getSSLSocket(store.getHost(), store.getPort(), clientSocketTimeout,
+                   trustStorePath, trustStorePassword );
+               LOG.info("Opened an SSL connection to metastore, current connections: " + connCount.incrementAndGet());
+             } catch(IOException e) {
+               throw new IllegalArgumentException(e);
+             } catch(TTransportException e) {
+               tte = e;
+               throw new MetaException(e.toString());
+             }
+           } else {
+             transport = new TSocket(store.getHost(), store.getPort(), clientSocketTimeout);
+           }
+ 
+           if (useSasl) {
+             // Wrap thrift connection with SASL for secure connection.
+             try {
+               HadoopThriftAuthBridge.Client authBridge =
+                 HadoopThriftAuthBridge.getBridge().createClient();
+ 
+               // check if we should use delegation tokens to authenticate
+               // the call below gets hold of the tokens if they are set up by hadoop
+               // this should happen on the map/reduce tasks if the client added the
+               // tokens into hadoop's credential store in the front end during job
+               // submission.
+               String tokenSig = MetastoreConf.getVar(conf, ConfVars.TOKEN_SIGNATURE);
+               // tokenSig could be null
+               tokenStrForm = SecurityUtils.getTokenStrForm(tokenSig);
+ 
+               if(tokenStrForm != null) {
+                 LOG.info("HMSC::open(): Found delegation token. Creating DIGEST-based thrift connection.");
+                 // authenticate using delegation tokens via the "DIGEST" mechanism
+                 transport = authBridge.createClientTransport(null, store.getHost(),
+                     "DIGEST", tokenStrForm, transport,
+                         MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL));
+               } else {
+                 LOG.info("HMSC::open(): Could not find delegation token. Creating KERBEROS-based thrift connection.");
+                 String principalConfig =
+                     MetastoreConf.getVar(conf, ConfVars.KERBEROS_PRINCIPAL);
+                 transport = authBridge.createClientTransport(
+                     principalConfig, store.getHost(), "KERBEROS", null,
+                     transport, MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL));
+               }
+             } catch (IOException ioe) {
+               LOG.error("Couldn't create client transport", ioe);
+               throw new MetaException(ioe.toString());
+             }
+           } else {
+             if (useFramedTransport) {
+               transport = new TFramedTransport(transport);
+             }
+           }
+ 
+           final TProtocol protocol;
+           if (useCompactProtocol) {
+             protocol = new TCompactProtocol(transport);
+           } else {
+             protocol = new TBinaryProtocol(transport);
+           }
+           client = new ThriftHiveMetastore.Client(protocol);
+           try {
+             if (!transport.isOpen()) {
+               transport.open();
+               LOG.info("Opened a connection to metastore, current connections: " + connCount.incrementAndGet());
+             }
+             isConnected = true;
+           } catch (TTransportException e) {
+             tte = e;
+             if (LOG.isDebugEnabled()) {
+               LOG.warn("Failed to connect to the MetaStore Server...", e);
+             } else {
+               // Don't print full exception trace if DEBUG is not on.
+               LOG.warn("Failed to connect to the MetaStore Server...");
+             }
+           }
+ 
+           if (isConnected && !useSasl && MetastoreConf.getBoolVar(conf, ConfVars.EXECUTE_SET_UGI)){
+             // Call set_ugi, only in unsecure mode.
+             try {
+               UserGroupInformation ugi = SecurityUtils.getUGI();
+               client.set_ugi(ugi.getUserName(), Arrays.asList(ugi.getGroupNames()));
+             } catch (LoginException e) {
+               LOG.warn("Failed to do login. set_ugi() is not successful, " +
+                        "Continuing without it.", e);
+             } catch (IOException e) {
+               LOG.warn("Failed to find ugi of client set_ugi() is not successful, " +
+                   "Continuing without it.", e);
+             } catch (TException e) {
+               LOG.warn("set_ugi() not successful, Likely cause: new client talking to old server. "
+                   + "Continuing without it.", e);
+             }
+           }
+         } catch (MetaException e) {
+           LOG.error("Unable to connect to metastore with URI " + store
+                     + " in attempt " + attempt, e);
+         }
+         if (isConnected) {
+           break;
+         }
+       }
+       // Wait before launching the next round of connection retries.
+       if (!isConnected && retryDelaySeconds > 0) {
+         try {
+           LOG.info("Waiting " + retryDelaySeconds + " seconds before next connection attempt.");
+           Thread.sleep(retryDelaySeconds * 1000);
+         } catch (InterruptedException ignore) {}
+       }
+     }
+ 
+     if (!isConnected) {
+       throw new MetaException("Could not connect to meta store using any of the URIs provided." +
+         " Most recent failure: " + StringUtils.stringifyException(tte));
+     }
+ 
+     snapshotActiveConf();
+ 
+     LOG.info("Connected to metastore.");
+   }
+ 
+   private void snapshotActiveConf() {
+     currentMetaVars = new HashMap<>(MetastoreConf.metaVars.length);
+     for (ConfVars oneVar : MetastoreConf.metaVars) {
+       currentMetaVars.put(oneVar.getVarname(), MetastoreConf.getAsString(conf, oneVar));
+     }
+   }
+ 
+   @Override
+   public String getTokenStrForm() throws IOException {
+     return tokenStrForm;
+    }
+ 
+   @Override
+   public void close() {
+     isConnected = false;
+     currentMetaVars = null;
+     try {
+       if (null != client) {
+         client.shutdown();
+       }
+     } catch (TException e) {
+       LOG.debug("Unable to shutdown metastore client. Will try closing transport directly.", e);
+     }
+     // Transport would have got closed via client.shutdown(), so we dont need this, but
+     // just in case, we make this call.
+     if ((transport != null) && transport.isOpen()) {
+       transport.close();
+       LOG.info("Closed a connection to metastore, current connections: " + connCount.decrementAndGet());
+     }
+   }
+ 
+   @Override
+   public void setMetaConf(String key, String value) throws TException {
+     client.setMetaConf(key, value);
+   }
+ 
+   @Override
+   public String getMetaConf(String key) throws TException {
+     return client.getMetaConf(key);
+   }
+ 
+   @Override
+   public void createCatalog(Catalog catalog) throws TException {
+     client.create_catalog(new CreateCatalogRequest(catalog));
+   }
+ 
+   @Override
+   public void alterCatalog(String catalogName, Catalog newCatalog) throws TException {
+     client.alter_catalog(new AlterCatalogRequest(catalogName, newCatalog));
+   }
+ 
+   @Override
+   public Catalog getCatalog(String catName) throws TException {
+     GetCatalogResponse rsp = client.get_catalog(new GetCatalogRequest(catName));
+     return rsp == null ? null : filterHook.filterCatalog(rsp.getCatalog());
+   }
+ 
+   @Override
+   public List<String> getCatalogs() throws TException {
+     GetCatalogsResponse rsp = client.get_catalogs();
+     return rsp == null ? null : filterHook.filterCatalogs(rsp.getNames());
+   }
+ 
+   @Override
+   public void dropCatalog(String catName) throws TException {
+     client.drop_catalog(new DropCatalogRequest(catName));
+   }
+ 
+   /**
+    * @param new_part
+    * @return the added partition
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partition(org.apache.hadoop.hive.metastore.api.Partition)
+    */
+   @Override
+   public Partition add_partition(Partition new_part) throws TException {
+     return add_partition(new_part, null);
+   }
+ 
+   public Partition add_partition(Partition new_part, EnvironmentContext envContext)
+       throws TException {
+     if (new_part != null && !new_part.isSetCatName()) {
+       new_part.setCatName(getDefaultCatalog(conf));
+     }
+     Partition p = client.add_partition_with_environment_context(new_part, envContext);
+     return deepCopy(p);
+   }
+ 
+   /**
+    * @param new_parts
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partitions(List)
+    */
+   @Override
+   public int add_partitions(List<Partition> new_parts) throws TException {
+     if (new_parts == null || new_parts.contains(null)) {
+       throw new MetaException("Partitions cannot be null.");
+     }
+     if (new_parts != null && !new_parts.isEmpty() && !new_parts.get(0).isSetCatName()) {
+       final String defaultCat = getDefaultCatalog(conf);
+       new_parts.forEach(p -> p.setCatName(defaultCat));
+     }
+     return client.add_partitions(new_parts);
+   }
+ 
+   @Override
+   public List<Partition> add_partitions(
+       List<Partition> parts, boolean ifNotExists, boolean needResults) throws TException {
+     if (parts == null || parts.contains(null)) {
+       throw new MetaException("Partitions cannot be null.");
+     }
+     if (parts.isEmpty()) {
+       return needResults ? new ArrayList<>() : null;
+     }
+     Partition part = parts.get(0);
+     // Have to set it for each partition too
+     if (!part.isSetCatName()) {
+       final String defaultCat = getDefaultCatalog(conf);
+       parts.forEach(p -> p.setCatName(defaultCat));
+     }
+     AddPartitionsRequest req = new AddPartitionsRequest(
+         part.getDbName(), part.getTableName(), parts, ifNotExists);
+     req.setCatName(part.isSetCatName() ? part.getCatName() : getDefaultCatalog(conf));
+     req.setNeedResult(needResults);
+     AddPartitionsResult result = client.add_partitions_req(req);
+     return needResults ? filterHook.filterPartitions(result.getPartitions()) : null;
+   }
+ 
+   @Override
+   public int add_partitions_pspec(PartitionSpecProxy partitionSpec) throws TException {
+     if (partitionSpec == null) {
+       throw new MetaException("PartitionSpec cannot be null.");
+     }
+     if (partitionSpec.getCatName() == null) {
+       partitionSpec.setCatName(getDefaultCatalog(conf));
+     }
+     return client.add_partitions_pspec(partitionSpec.toPartitionSpec());
+   }
+ 
+   @Override
+   public Partition appendPartition(String db_name, String table_name,
+       List<String> part_vals) throws TException {
+     return appendPartition(getDefaultCatalog(conf), db_name, table_name, part_vals);
+   }
+ 
+   @Override
+   public Partition appendPartition(String dbName, String tableName, String partName)
+       throws TException {
+     return appendPartition(getDefaultCatalog(conf), dbName, tableName, partName);
+   }
+ 
+   @Override
+   public Partition appendPartition(String catName, String dbName, String tableName,
+                                    String name) throws TException {
+     Partition p = client.append_partition_by_name(prependCatalogToDbName(
+         catName, dbName, conf), tableName, name);
+     return deepCopy(p);
+   }
+ 
+   @Override
+   public Partition appendPartition(String catName, String dbName, String tableName,
+                                    List<String> partVals) throws TException {
+     Partition p = client.append_partition(prependCatalogToDbName(
+         catName, dbName, conf), tableName, partVals);
+     return deepCopy(p);
+   }
+ 
+   @Deprecated
+   public Partition appendPartition(String dbName, String tableName, List<String> partVals,
+                                    EnvironmentContext ec) throws TException {
+     return client.append_partition_with_environment_context(prependCatalogToDbName(dbName, conf),
+         tableName, partVals, ec).deepCopy();
+   }
+ 
+   /**
+    * Exchange the partition between two tables
+    * @param partitionSpecs partitions specs of the parent partition to be exchanged
+    * @param destDb the db of the destination table
+    * @param destinationTableName the destination table name
+    * @return new partition after exchanging
+    */
+   @Override
+   public Partition exchange_partition(Map<String, String> partitionSpecs,
+       String sourceDb, String sourceTable, String destDb,
+       String destinationTableName) throws TException {
+     return exchange_partition(partitionSpecs, getDefaultCatalog(conf), sourceDb, sourceTable,
+         getDefaultCatalog(conf), destDb, destinationTableName);
+   }
+ 
+   @Override
+   public Partition exchange_partition(Map<String, String> partitionSpecs, String sourceCat,
+                                       String sourceDb, String sourceTable, String destCat,
+                                       String destDb, String destTableName) throws TException {
+     return client.exchange_partition(partitionSpecs, prependCatalogToDbName(sourceCat, sourceDb, conf),
+         sourceTable, prependCatalogToDbName(destCat, destDb, conf), destTableName);
+   }
+ 
+   /**
+    * Exchange the partitions between two tables
+    * @param partitionSpecs partitions specs of the parent partition to be exchanged
+    * @param destDb the db of the destination table
+    * @param destinationTableName the destination table name
+    * @return new partitions after exchanging
+    */
+   @Override
+   public List<Partition> exchange_partitions(Map<String, String> partitionSpecs,
+       String sourceDb, String sourceTable, String destDb,
+       String destinationTableName) throws TException {
+     return exchange_partitions(partitionSpecs, getDefaultCatalog(conf), sourceDb, sourceTable,
+         getDefaultCatalog(conf), destDb, destinationTableName);
+   }
+ 
+   @Override
++  public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
++      String dbName, String tableName, List<String> partNames, List<String> colNames,
++      long txnId, String validWriteIdList)
++      throws NoSuchObjectException, MetaException, TException {
++    return getPartitionColumnStatistics(getDefaultCatalog(conf), dbName, tableName,
++        partNames, colNames, txnId, validWriteIdList);
++  }
++
++  @Override
++  public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
++      String catName, String dbName, String tableName, List<String> partNames,
++      List<String> colNames, long txnId, String validWriteIdList)
++      throws NoSuchObjectException, MetaException, TException {
++    PartitionsStatsRequest rqst = new PartitionsStatsRequest(dbName, tableName, colNames,
++        partNames);
++    rqst.setCatName(catName);
++    rqst.setTxnId(txnId);
++    rqst.setValidWriteIdList(validWriteIdList);
++    return client.get_partitions_statistics_req(rqst).getPartStats();
++  }
++
++  @Override
++  public AggrStats getAggrColStatsFor(String dbName, String tblName, List<String> colNames,
++      List<String> partNames, long txnId, String writeIdList)
++      throws NoSuchObjectException, MetaException, TException {
++    return getAggrColStatsFor(getDefaultCatalog(conf), dbName, tblName, colNames,
++        partNames, txnId, writeIdList);  }
++
++  @Override
++  public AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, List<String> colNames,
++      List<String> partNames, long txnId, String writeIdList)
++      throws NoSuchObjectException, MetaException, TException {
++    if (colNames.isEmpty() || partNames.isEmpty()) {
++      LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side.");
++      return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate
++    }
++    PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames);
++    req.setCatName(catName);
++    req.setTxnId(txnId);
++    req.setValidWriteIdList(writeIdList);
++    return client.get_aggr_stats_for(req);
++  }
++
++  @Override
+   public List<Partition> exchange_partitions(Map<String, String> partitionSpecs, String sourceCat,
+                                              String sourceDb, String sourceTable, String destCat,
+                                              String destDb, String destTableName) throws TException {
+     return client.exchange_partitions(partitionSpecs, prependCatalogToDbName(sourceCat, sourceDb, conf),
+         sourceTable, prependCatalogToDbName(destCat, destDb, conf), destTableName);
+   }
+ 
+   @Override
+   public void validatePartitionNameCharacters(List<String> partVals)
+       throws TException, MetaException {
+     client.partition_name_has_valid_characters(partVals, true);
+   }
+ 
+   /**
+    * Create a new Database
+    * @param db
+    * @throws AlreadyExistsException
+    * @throws InvalidObjectException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_database(Database)
+    */
+   @Override
+   public void createDatabase(Database db)
+       throws AlreadyExistsException, InvalidObjectException, MetaException, TException {
+     if (!db.isSetCatalogName()) {
+       db.setCatalogName(getDefaultCatalog(conf));
+     }
+     client.create_database(db);
+   }
+ 
+   /**
+    * @param tbl
+    * @throws MetaException
+    * @throws NoSuchObjectException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table)
+    */
+   @Override
+   public void createTable(Table tbl) throws AlreadyExistsException,
+       InvalidObjectException, MetaException, NoSuchObjectException, TException {
+     createTable(tbl, null);
+   }
+ 
+   public void createTable(Table tbl, EnvironmentContext envContext) throws AlreadyExistsException,
+       InvalidObjectException, MetaException, NoSuchObjectException, TException {
+     if (!tbl.isSetCatName()) {
+       tbl.setCatName(getDefaultCatalog(conf));
+     }
+     HiveMetaHook hook = getHook(tbl);
+     if (hook != null) {
+       hook.preCreateTable(tbl);
+     }
+     boolean success = false;
+     try {
+       // Subclasses can override this step (for example, for temporary tables)
+       create_table_with_environment_context(tbl, envContext);
+       if (hook != null) {
+         hook.commitCreateTable(tbl);
+       }
+       success = true;
+     }
+     finally {
+       if (!success && (hook != null)) {
+         try {
+           hook.rollbackCreateTable(tbl);
+         } catch (Exception e){
+           LOG.error("Create rollback failed with", e);
+         }
+       }
+     }
+   }
+ 
+   @Override
+   public void createTableWithConstraints(Table tbl,
+     List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys,
+     List<SQLUniqueConstraint> uniqueConstraints,
+     List<SQLNotNullConstraint> notNullConstraints,
+     List<SQLDefaultConstraint> defaultConstraints,
+     List<SQLCheckConstraint> checkConstraints)
+         throws AlreadyExistsException, InvalidObjectException,
+         MetaException, NoSuchObjectException, TException {
+ 
+     if (!tbl.isSetCatName()) {
+       String defaultCat = getDefaultCatalog(conf);
+       tbl.setCatName(defaultCat);
+       if (primaryKeys != null) {
+         primaryKeys.forEach(pk -> pk.setCatName(defaultCat));
+       }
+       if (foreignKeys != null) {
+         foreignKeys.forEach(fk -> fk.setCatName(defaultCat));
+       }
+       if (uniqueConstraints != null) {
+         uniqueConstraints.forEach(uc -> uc.setCatName(defaultCat));
+       }
+       if (notNullConstraints != null) {
+         notNullConstraints.forEach(nn -> nn.setCatName(defaultCat));
+       }
+       if (defaultConstraints != null) {
+         defaultConstraints.forEach(def -> def.setCatName(defaultCat));
+       }
+       if (checkConstraints != null) {
+         checkConstraints.forEach(cc -> cc.setCatName(defaultCat));
+       }
+     }
+     HiveMetaHook hook = getHook(tbl);
+     if (hook != null) {
+       hook.preCreateTable(tbl);
+     }
+     boolean success = false;
+     try {
+       // Subclasses can override this step (for example, for temporary tables)
+       client.create_table_with_constraints(tbl, primaryKeys, foreignKeys,
+           uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
+       if (hook != null) {
+         hook.commitCreateTable(tbl);
+       }
+       success = true;
+     } finally {
+       if (!success && (hook != null)) {
+         hook.rollbackCreateTable(tbl);
+       }
+     }
+   }
+ 
+   @Override
+   public void dropConstraint(String dbName, String tableName, String constraintName)
+       throws TException {
+     dropConstraint(getDefaultCatalog(conf), dbName, tableName, constraintName);
+   }
+ 
+   @Override
+   public void dropConstraint(String catName, String dbName, String tableName, String constraintName)
+       throws TException {
+     DropConstraintRequest rqst = new DropConstraintRequest(dbName, tableName, constraintName);
+     rqst.setCatName(catName);
+     client.drop_constraint(rqst);
+   }
+ 
+   @Override
+   public void addPrimaryKey(List<SQLPrimaryKey> primaryKeyCols) throws TException {
+     if (!primaryKeyCols.isEmpty() && !primaryKeyCols.get(0).isSetCatName()) {
+       String defaultCat = getDefaultCatalog(conf);
+       primaryKeyCols.forEach(pk -> pk.setCatName(defaultCat));
+     }
+     client.add_primary_key(new AddPrimaryKeyRequest(primaryKeyCols));
+   }
+ 
+   @Override
+   public void addForeignKey(List<SQLForeignKey> foreignKeyCols) throws TException {
+     if (!foreignKeyCols.isEmpty() && !foreignKeyCols.get(0).isSetCatName()) {
+       String defaultCat = getDefaultCatalog(conf);
+       foreignKeyCols.forEach(fk -> fk.setCatName(defaultCat));
+     }
+     client.add_foreign_key(new AddForeignKeyRequest(foreignKeyCols));
+   }
+ 
+   @Override
+   public void addUniqueConstraint(List<SQLUniqueConstraint> uniqueConstraintCols) throws
+     NoSuchObjectException, MetaException, TException {
+     if (!uniqueConstraintCols.isEmpty() && !uniqueConstraintCols.get(0).isSetCatName()) {
+       String defaultCat = getDefaultCatalog(conf);
+       uniqueConstraintCols.forEach(uc -> uc.setCatName(defaultCat));
+     }
+     client.add_unique_constraint(new AddUniqueConstraintRequest(uniqueConstraintCols));
+   }
+ 
+   @Override
+   public void addNotNullConstraint(List<SQLNotNullConstraint> notNullConstraintCols) throws
+     NoSuchObjectException, MetaException, TException {
+     if (!notNullConstraintCols.isEmpty() && !notNullConstraintCols.get(0).isSetCatName()) {
+       String defaultCat = getDefaultCatalog(conf);
+       notNullConstraintCols.forEach(nn -> nn.setCatName(defaultCat));
+     }
+     client.add_not_null_constraint(new AddNotNullConstraintRequest(notNullConstraintCols));
+   }
+ 
+   @Override
+   public void addDefaultConstraint(List<SQLDefaultConstraint> defaultConstraints) throws
+       NoSuchObjectException, MetaException, TException {
+     if (!defaultConstraints.isEmpty() && !defaultConstraints.get(0).isSetCatName()) {
+       String defaultCat = getDefaultCatalog(conf);
+       defaultConstraints.forEach(def -> def.setCatName(defaultCat));
+     }
+     client.add_default_constraint(new AddDefaultConstraintRequest(defaultConstraints));
+   }
+ 
+   @Override
+   public void addCheckConstraint(List<SQLCheckConstraint> checkConstraints) throws
+       NoSuchObjectException, MetaException, TException {
+     if (!checkConstraints.isEmpty() && !checkConstraints.get(0).isSetCatName()) {
+       String defaultCat = getDefaultCatalog(conf);
+       checkConstraints.forEach(cc -> cc.setCatName(defaultCat));
+     }
+     client.add_check_constraint(new AddCheckConstraintRequest(checkConstraints));
+   }
+ 
+   /**
+    * @param type
+    * @return true or false
+    * @throws AlreadyExistsException
+    * @throws InvalidObjectException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_type(org.apache.hadoop.hive.metastore.api.Type)
+    */
+   public boolean createType(Type type) throws AlreadyExistsException,
+       InvalidObjectException, MetaException, TException {
+     return client.create_type(type);
+   }
+ 
+   /**
+    * @param name
+    * @throws NoSuchObjectException
+    * @throws InvalidOperationException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_database(java.lang.String, boolean, boolean)
+    */
+   @Override
+   public void dropDatabase(String name)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     dropDatabase(getDefaultCatalog(conf), name, true, false, false);
+   }
+ 
+   @Override
+   public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     dropDatabase(getDefaultCatalog(conf), name, deleteData, ignoreUnknownDb, false);
+   }
+ 
+   @Override
+   public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     dropDatabase(getDefaultCatalog(conf), name, deleteData, ignoreUnknownDb, cascade);
+   }
+ 
+   @Override
+   public void dropDatabase(String catalogName, String dbName, boolean deleteData,
+                            boolean ignoreUnknownDb, boolean cascade)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     try {
+       getDatabase(catalogName, dbName);
+     } catch (NoSuchObjectException e) {
+       if (!ignoreUnknownDb) {
+         throw e;
+       }
+       return;
+     }
+ 
+     String dbNameWithCatalog = prependCatalogToDbName(catalogName, dbName, conf);
+ 
+     if (cascade) {
+       // Note that this logic may drop some of the tables of the database
+       // even if the drop database fail for any reason
+       // TODO: Fix this
+       List<String> materializedViews = getTables(dbName, ".*", TableType.MATERIALIZED_VIEW);
+       for (String table : materializedViews) {
+         // First we delete the materialized views
+         dropTable(dbName, table, deleteData, true);
+       }
+ 
+       /**
+        * When dropping db cascade, client side hooks have to be called at each table removal.
+        * If {@link org.apache.hadoop.hive.metastore.conf.MetastoreConf#ConfVars.BATCH_RETRIEVE_MAX
+        * BATCH_RETRIEVE_MAX} is less than the number of tables in the DB, we'll have to call the
+        * hooks one by one each alongside with a
+        * {@link #dropTable(String, String, boolean, boolean, EnvironmentContext) dropTable} call to
+        * ensure transactionality.
+        */
+       List<String> tableNameList = getAllTables(dbName);
+       int tableCount = tableNameList.size();
+       int maxBatchSize = MetastoreConf.getIntVar(conf, ConfVars.BATCH_RETRIEVE_MAX);
+       LOG.debug("Selecting dropDatabase method for " + dbName + " (" + tableCount + " tables), " +
+              ConfVars.BATCH_RETRIEVE_MAX.getVarname() + "=" + maxBatchSize);
+ 
+       if (tableCount > maxBatchSize) {
+         LOG.debug("Dropping database in a per table batch manner.");
+         dropDatabaseCascadePerTable(catalogName, dbName, tableNameList, deleteData, maxBatchSize);
+       } else {
+         LOG.debug("Dropping database in a per DB manner.");
+         dropDatabaseCascadePerDb(catalogName, dbName, tableNameList, deleteData);
+       }
+ 
+     } else {
+       client.drop_database(dbNameWithCatalog, deleteData, cascade);
+     }
+   }
+ 
+   /**
+    * Handles dropDatabase by invoking drop_table in HMS for each table.
+    * Useful when table list in DB is too large to fit in memory. It will retrieve tables in
+    * chunks and for each table with a drop_table hook it will invoke drop_table on both HMS and
+    * the hook. This is a timely operation so hookless tables are skipped and will be dropped on
+    * server side when the client invokes drop_database.
+    * Note that this is 'less transactional' than dropDatabaseCascadePerDb since we're dropping
+    * table level objects, so the overall outcome of this method might result in a halfly dropped DB.
+    * @param catName
+    * @param dbName
+    * @param tableList
+    * @param deleteData
+    * @param maxBatchSize
+    * @throws TException
+    */
+   private void dropDatabaseCascadePerTable(String catName, String dbName, List<String> tableList,
+                                            boolean deleteData, int maxBatchSize) throws TException {
+     String dbNameWithCatalog = prependCatalogToDbName(catName, dbName, conf);
+     for (Table table : new TableIterable(this, catName, dbName, tableList, maxBatchSize)) {
+       boolean success = false;
+       HiveMetaHook hook = getHook(table);
+       if (hook == null) {
+         continue;
+       }
+       try {
+         hook.preDropTable(table);
+         client.drop_table_with_environment_context(dbNameWithCatalog, table.getTableName(), deleteData, null);
+         hook.commitDropTable(table, deleteData);
+         success = true;
+       } finally {
+         if (!success) {
+           hook.rollbackDropTable(table);
+         }
+       }
+     }
+     client.drop_database(dbNameWithCatalog, deleteData, true);
+   }
+ 
+   /**
+    * Handles dropDatabase by invoking drop_database in HMS.
+    * Useful when table list in DB can fit in memory, it will retrieve all tables at once and
+    * call drop_database once. Also handles drop_table hooks.
+    * @param catName
+    * @param dbName
+    * @param tableList
+    * @param deleteData
+    * @throws TException
+    */
+   private void dropDatabaseCascadePerDb(String catName, String dbName, List<String> tableList,
+                                         boolean deleteData) throws TException {
+     String dbNameWithCatalog = prependCatalogToDbName(catName, dbName, conf);
+     List<Table> tables = getTableObjectsByName(catName, dbName, tableList);
+     boolean success = false;
+     try {
+       for (Table table : tables) {
+         HiveMetaHook hook = getHook(table);
+         if (hook == null) {
+           continue;
+         }
+         hook.preDropTable(table);
+       }
+       client.drop_database(dbNameWithCatalog, deleteData, true);
+       for (Table table : tables) {
+         HiveMetaHook hook = getHook(table);
+         if (hook == null) {
+           continue;
+         }
+         hook.commitDropTable(table, deleteData);
+       }
+       success = true;
+     } finally {
+       if (!success) {
+         for (Table table : tables) {
+           HiveMetaHook hook = getHook(table);
+           if (hook == null) {
+             continue;
+           }
+           hook.rollbackDropTable(table);
+         }
+       }
+     }
+   }
+ 
+   @Override
+   public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData)
+       throws TException {
+     return dropPartition(getDefaultCatalog(conf), dbName, tableName, partName, deleteData);
+   }
+ 
+   @Override
+   public boolean dropPartition(String catName, String db_name, String tbl_name, String name,
+                                boolean deleteData) throws TException {
+     return client.drop_partition_by_name_with_environment_context(prependCatalogToDbName(
+         catName, db_name, conf), tbl_name, name, deleteData, null);
+   }
+ 
+   private static EnvironmentContext getEnvironmentContextWithIfPurgeSet() {
+     Map<String, String> warehouseOptions = new HashMap<>();
+     warehouseOptions.put("ifPurge", "TRUE");
+     return new EnvironmentContext(warehouseOptions);
+   }
+ 
+   // A bunch of these are in HiveMetaStoreClient but not IMetaStoreClient.  I have marked these
+   // as deprecated and not updated them for the catalogs.  If we really want to support them we
+   // should add them to IMetaStoreClient.
+ 
+   @Deprecated
+   public boolean dropPartition(String db_name, String tbl_name, List<String> part_vals,
+       EnvironmentContext env_context) throws TException {
+     return client.drop_partition_with_environment_context(prependCatalogToDbName(db_name, conf),
+         tbl_name, part_vals, true, env_context);
+   }
+ 
+   @Deprecated
+   public boolean dropPartition(String dbName, String tableName, String partName, boolean dropData,
+                                EnvironmentContext ec) throws TException {
+     return client.drop_partition_by_name_with_environment_context(prependCatalogToDbName(dbName, conf),
+         tableName, partName, dropData, ec);
+   }
+ 
+   @Deprecated
+   public boolean dropPartition(String dbName, String tableName, List<String> partVals)
+       throws TException {
+     return client.drop_partition(prependCatalogToDbName(dbName, conf), tableName, partVals, true);
+   }
+ 
+   @Override
+   public boolean dropPartition(String db_name, String tbl_name,
+       List<String> part_vals, boolean deleteData) throws TException {
+     return dropPartition(getDefaultCatalog(conf), db_name, tbl_name, part_vals,
+         PartitionDropOptions.instance().deleteData(deleteData));
+   }
+ 
+   @Override
+   public boolean dropPartition(String catName, String db_name, String tbl_name,
+                                List<String> part_vals, boolean deleteData) throws TException {
+     return dropPartition(catName, db_name, tbl_name, part_vals, PartitionDropOptions.instance()
+             .deleteData(deleteData));
+   }
+ 
+   @Override
+   public boolean dropPartition(String db_name, String tbl_name,
+                                List<String> part_vals, PartitionDropOptions options) throws TException {
+     return dropPartition(getDefaultCatalog(conf), db_name, tbl_name, part_vals, options);
+   }
+ 
+   @Override
+   public boolean dropPartition(String catName, String db_name, String tbl_name,
+                                List<String> part_vals, PartitionDropOptions options)
+       throws TException {
+     if (options == null) {
+       options = PartitionDropOptions.instance();
+     }
+     if (part_vals != null) {
+       for (String partVal : part_vals) {
+         if (partVal == null) {
+           throw new MetaException("The partition value must not be null.");
+         }
+       }
+     }
+     return client.drop_partition_with_environment_context(prependCatalogToDbName(
+         catName, db_name, conf), tbl_name, part_vals, options.deleteData,
+         options.purgeData ? getEnvironmentContextWithIfPurgeSet() : null);
+   }
+ 
+   @Override
+   public List<Partition> dropPartitions(String dbName, String tblName,
+                                         List<ObjectPair<Integer, byte[]>> partExprs,
+                                         PartitionDropOptions options)
+       throws TException {
+     return dropPartitions(getDefaultCatalog(conf), dbName, tblName, partExprs, options);
+   }
+ 
+   @Override
+   public List<Partition> dropPartitions(String dbName, String tblName,
+       List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
+       boolean ifExists, boolean needResult) throws NoSuchObjectException, MetaException, TException {
+ 
+     return dropPartitions(getDefaultCatalog(conf), dbName, tblName, partExprs,
+                           PartitionDropOptions.instance()
+                                               .deleteData(deleteData)
+                                               .ifExists(ifExists)
+                                               .returnResults(needResult));
+ 
+   }
+ 
+   @Override
+   public List<Partition> dropPartitions(String dbName, String tblName,
+       List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
+       boolean ifExists) throws NoSuchObjectException, MetaException, TException {
+     // By default, we need the results from dropPartitions();
+     return dropPartitions(getDefaultCatalog(conf), dbName, tblName, partExprs,
+                           PartitionDropOptions.instance()
+                                               .deleteData(deleteData)
+                                               .ifExists(ifExists));
+   }
+ 
+   @Override
+   public List<Partition> dropPartitions(String catName, String dbName, String tblName,
+                                         List<ObjectPair<Integer, byte[]>> partExprs,
+                                         PartitionDropOptions options) throws TException {
+     RequestPartsSpec rps = new RequestPartsSpec();
+     List<DropPartitionsExpr> exprs = new ArrayList<>(partExprs.size());
+     for (ObjectPair<Integer, byte[]> partExpr : partExprs) {
+       DropPartitionsExpr dpe = new DropPartitionsExpr();
+       dpe.setExpr(partExpr.getSecond());
+       dpe.setPartArchiveLevel(partExpr.getFirst());
+       exprs.add(dpe);
+     }
+     rps.setExprs(exprs);
+     DropPartitionsRequest req = new DropPartitionsRequest(dbName, tblName, rps);
+     req.setCatName(catName);
+     req.setDeleteData(options.deleteData);
+     req.setNeedResult(options.returnResults);
+     req.setIfExists(options.ifExists);
+     if (options.purgeData) {
+       LOG.info("Dropped partitions will be purged!");
+       req.setEnvironmentContext(getEnvironmentContextWithIfPurgeSet());
+     }
+     return client.drop_partitions_req(req).getPartitions();
+   }
+ 
+   @Override
+   public void dropTable(String dbname, String name, boolean deleteData,
+       boolean ignoreUnknownTab) throws MetaException, TException,
+       NoSuchObjectException, UnsupportedOperationException {
+     dropTable(getDefaultCatalog(conf), dbname, name, deleteData, ignoreUnknownTab, null);
+   }
+ 
+   @Override
+   public void dropTable(String dbname, String name, boolean deleteData,
+       boolean ignoreUnknownTab, boolean ifPurge) throws TException {
+     dropTable(getDefaultCatalog(conf), dbname, name, deleteData, ignoreUnknownTab, ifPurge);
+   }
+ 
+   @Override
+   public void dropTable(String dbname, String name) throws TException {
+     dropTable(getDefaultCatalog(conf), dbname, name, true, true, null);
+   }
+ 
+   @Override
+   public void dropTable(String catName, String dbName, String tableName, boolean deleteData,
+                         boolean ignoreUnknownTable, boolean ifPurge) throws TException {
+     //build new environmentContext with ifPurge;
+     EnvironmentContext envContext = null;
+     if(ifPurge){
+       Map<String, String> warehouseOptions;
+       warehouseOptions = new HashMap<>();
+       warehouseOptions.put("ifPurge", "TRUE");
+       envContext = new EnvironmentContext(warehouseOptions);
+     }
+     dropTable(catName, dbName, tableName, deleteData, ignoreUnknownTable, envContext);
+ 
+   }
+ 
+   /**
+    * Drop the table and choose whether to: delete the underlying table data;
+    * throw if the table doesn't exist; save the data in the trash.
+    *
+    * @param catName catalog name
+    * @param dbname database name
+    * @param name table name
+    * @param deleteData
+    *          delete the underlying data or just delete the table in metadata
+    * @param ignoreUnknownTab
+    *          don't throw if the requested table doesn't exist
+    * @param envContext
+    *          for communicating with thrift
+    * @throws MetaException
+    *           could not drop table properly
+    * @throws NoSuchObjectException
+    *           the table wasn't found
+    * @throws TException
+    *           a thrift communication error occurred
+    * @throws UnsupportedOperationException
+    *           dropping an index table is not allowed
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String,
+    *      java.lang.String, boolean)
+    */
+   public void dropTable(String catName, String dbname, String name, boolean deleteData,
+       boolean ignoreUnknownTab, EnvironmentContext envContext) throws MetaException, TException,
+       NoSuchObjectException, UnsupportedOperationException {
+     Table tbl;
+     try {
+       tbl = getTable(catName, dbname, name);
+     } catch (NoSuchObjectException e) {
+       if (!ignoreUnknownTab) {
+         throw e;
+       }
+       return;
+     }
+     HiveMetaHook hook = getHook(tbl);
+     if (hook != null) {
+       hook.preDropTable(tbl);
+     }
+     boolean success = false;
+     try {
+       drop_table_with_environment_context(catName, dbname, name, deleteData, envContext);
+       if (hook != null) {
+         hook.commitDropTable(tbl, deleteData || (envContext != null && "TRUE".equals(envContext.getProperties().get("ifPurge"))));
+       }
+       success=true;
+     } catch (NoSuchObjectException e) {
+       if (!ignoreUnknownTab) {
+         throw e;
+       }
+     } finally {
+       if (!success && (hook != null)) {
+         hook.rollbackDropTable(tbl);
+       }
+     }
+   }
+ 
+   @Override
++  public void truncateTable(String dbName, String tableName, List<String> partNames,
++      long txnId, String validWriteIds, long writeId) throws TException {
++    truncateTableInternal(getDefaultCatalog(conf),
++        dbName, tableName, partNames, txnId, validWriteIds, writeId);
++  }
++
++  @Override
+   public void truncateTable(String dbName, String tableName, List<String> partNames) throws TException {
 -    truncateTable(getDefaultCatalog(conf), dbName, tableName, partNames);
++    truncateTableInternal(getDefaultCatalog(conf), dbName, tableName, partNames, -1, null, -1);
+   }
+ 
+   @Override
+   public void truncateTable(String catName, String dbName, String tableName, List<String> partNames)
+       throws TException {
 -    client.truncate_table(prependCatalogToDbName(catName, dbName, conf), tableName, partNames);
++    truncateTableInternal(catName, dbName, tableName, partNames, -1, null, -1);
++  }
++
++  private void truncateTableInternal(String catName, String dbName, String tableName,
++      List<String> partNames, long txnId, String validWriteIds, long writeId)
++          throws MetaException, TException {
++    TruncateTableRequest req = new TruncateTableRequest(
++        prependCatalogToDbName(catName, dbName, conf), tableName);
++    req.setPartNames(partNames);
++    req.setTxnId(txnId);
++    req.setValidWriteIdList(validWriteIds);
++    req.setWriteId(writeId);
++    client.truncate_table_req(req);
+   }
+ 
+   /**
+    * Recycles the files recursively from the input path to the cmroot directory either by copying or moving it.
+    *
+    * @param request Inputs for path of the data files to be recycled to cmroot and
+    *                isPurge flag when set to true files which needs to be recycled are not moved to Trash
+    * @return Response which is currently void
+    */
+   @Override
+   public CmRecycleResponse recycleDirToCmPath(CmRecycleRequest request) throws MetaException, TException {
+     return client.cm_recycle(request);
+   }
+ 
+   /**
+    * @param type
+    * @return true if the type is dropped
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_type(java.lang.String)
+    */
+   public boolean dropType(String type) throws NoSuchObjectException, MetaException, TException {
+     return client.drop_type(type);
+   }
+ 
+   /**
+    * @param name
+    * @return map of types
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type_all(java.lang.String)
+    */
+   public Map<String, Type> getTypeAll(String name) throws MetaException,
+       TException {
+     Map<String, Type> result = null;
+     Map<String, Type> fromClient = client.get_type_all(name);
+     if (fromClient != null) {
+       result = new LinkedHashMap<>();
+       for (String key : fromClient.keySet()) {
+         result.put(key, deepCopy(fromClient.get(key)));
+       }
+     }
+     return result;
+   }
+ 
+   @Override
+   public List<String> getDatabases(String databasePattern) throws TException {
+     return getDatabases(getDefaultCatalog(conf), databasePattern);
+   }
+ 
+   @Override
+   public List<String> getDatabases(String catName, String databasePattern) throws TException {
+     return filterHook.filterDatabases(client.get_databases(prependCatalogToDbName(
+         catName, databasePattern, conf)));
+   }
+ 
+   @Override
+   public List<String> getAllDatabases() throws TException {
+     return getAllDatabases(getDefaultCatalog(conf));
+   }
+ 
+   @Override
+   public List<String> getAllDatabases(String catName) throws TException {
+     return filterHook.filterDatabases(client.get_databases(prependCatalogToDbName(catName, null, conf)));
+   }
+ 
+   @Override
+   public List<Partition> listPartitions(String db_name, String tbl_name, short max_parts)
+       throws TException {
+     return listPartitions(getDefaultCatalog(conf), db_name, tbl_name, max_parts);
+   }
+ 
+   @Override
+   public List<Partition> listPartitions(String catName, String db_name, String tbl_name,
+                                         int max_parts) throws TException {
+     List<Partition> parts = client.get_partitions(prependCatalogToDbName(catName, db_name, conf),
+         tbl_name, shrinkMaxtoShort(max_parts));
+     return deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts) throws TException {
+     return listPartitionSpecs(getDefaultCatalog(conf), dbName, tableName, maxParts);
+   }
+ 
+   @Override
+   public PartitionSpecProxy listPartitionSpecs(String catName, String dbName, String tableName,
+                                                int maxParts) throws TException {
+     return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs(
+         client.get_partitions_pspec(prependCatalogToDbName(catName, dbName, conf), tableName, maxParts)));
+   }
+ 
+   @Override
+   public List<Partition> listPartitions(String db_name, String tbl_name,
+                                         List<String> part_vals, short max_parts) throws TException {
+     return listPartitions(getDefaultCatalog(conf), db_name, tbl_name, part_vals, max_parts);
+   }
+ 
+   @Override
+   public List<Partition> listPartitions(String catName, String db_name, String tbl_name,
+                                         List<String> part_vals, int max_parts) throws TException {
+     List<Partition> parts = client.get_partitions_ps(prependCatalogToDbName(catName, db_name, conf),
+         tbl_name, part_vals, shrinkMaxtoShort(max_parts));
+     return deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsWithAuthInfo(String db_name, String tbl_name,
+                                                     short max_parts, String user_name,
+                                                     List<String> group_names) throws TException {
+     return listPartitionsWithAuthInfo(getDefaultCatalog(conf), db_name, tbl_name, max_parts, user_name,
+         group_names);
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsWithAuthInfo(String catName, String dbName, String tableName,
+                                                     int maxParts, String userName,
+                                                     List<String> groupNames) throws TException {
+     List<Partition> parts = client.get_partitions_with_auth(prependCatalogToDbName(catName,
+         dbName, conf), tableName, shrinkMaxtoShort(maxParts), userName, groupNames);
+     return deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsWithAuthInfo(String db_name, String tbl_name,
+                                                     List<String> part_vals, short max_parts,
+                                                     String user_name, List<String> group_names)
+       throws TException {
+     return listPartitionsWithAuthInfo(getDefaultCatalog(conf), db_name, tbl_name, part_vals, max_parts,
+         user_name, group_names);
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsWithAuthInfo(String catName, String dbName, String tableName,
+                                                     List<String> partialPvals, int maxParts,
+                                                     String userName, List<String> groupNames)
+       throws TException {
+     List<Partition> parts = client.get_partitions_ps_with_auth(prependCatalogToDbName(catName,
+         dbName, conf), tableName, partialPvals, shrinkMaxtoShort(maxParts), userName, groupNames);
+     return deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsByFilter(String db_name, String tbl_name,
+       String filter, short max_parts) throws TException {
+     return listPartitionsByFilter(getDefaultCatalog(conf), db_name, tbl_name, filter, max_parts);
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsByFilter(String catName, String db_name, String tbl_name,
+                                                 String filter, int max_parts) throws TException {
+     List<Partition> parts =client.get_partitions_by_filter(prependCatalogToDbName(
+         catName, db_name, conf), tbl_name, filter, shrinkMaxtoShort(max_parts));
+     return deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name,
+                                                        String filter, int max_parts)
+       throws TException {
+     return listPartitionSpecsByFilter(getDefaultCatalog(conf), db_name, tbl_name, filter, max_parts);
+   }
+ 
+   @Override
+   public PartitionSpecProxy listPartitionSpecsByFilter(String catName, String db_name,
+                                                        String tbl_name, String filter,
+                                                        int max_parts) throws TException {
+     return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs(
+         client.get_part_specs_by_filter(prependCatalogToDbName(catName, db_name, conf), tbl_name, filter,
+             max_parts)));
+   }
+ 
+   @Override
+   public boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr,
+                                       String default_partition_name, short max_parts,
+                                       List<Partition> result) throws TException {
+     return listPartitionsByExpr(getDefaultCatalog(conf), db_name, tbl_name, expr,
+         default_partition_name, max_parts, result);
+   }
+ 
+   @Override
+   public boolean listPartitionsByExpr(String catName, String db_name, String tbl_name, byte[] expr,
+       String default_partition_name, int max_parts, List<Partition> result)
+           throws TException {
+     assert result != null;
+     PartitionsByExprRequest req = new PartitionsByExprRequest(
+         db_name, tbl_name, ByteBuffer.wrap(expr));
+     if (default_partition_name != null) {
+       req.setDefaultPartitionName(default_partition_name);
+     }
+     if (max_parts >= 0) {
+       req.setMaxParts(shrinkMaxtoShort(max_parts));
+     }
+     PartitionsByExprResult r;
+     try {
+       r = client.get_partitions_by_expr(req);
+     } catch (TApplicationException te) {
+       // TODO: backward compat for Hive <= 0.12. Can be removed later.
+       if (te.getType() != TApplicationException.UNKNOWN_METHOD
+           && te.getType() != TApplicationException.WRONG_METHOD_NAME) {
+         throw te;
+       }
+       throw new IncompatibleMetastoreException(
+           "Metastore doesn't support listPartitionsByExpr: " + te.getMessage());
+     }
+     r.setPartitions(filterHook.filterPartitions(r.getPartitions()));
+     // TODO: in these methods, do we really need to deepcopy?
+     deepCopyPartitions(r.getPartitions(), result);
+     return !r.isSetHasUnknownPartitions() || r.isHasUnknownPartitions(); // Assume the worst.
+   }
+ 
+   @Override
+   public Database getDatabase(String name) throws TException {
+     return getDatabase(getDefaultCatalog(conf), name);
+   }
+ 
+   @Override
+   public Database getDatabase(String catalogName, String databaseName) throws TException {
+     Database d = client.get_database(prependCatalogToDbName(catalogName, databaseName, conf));
+     return deepCopy(filterHook.filterDatabase(d));
+   }
+ 
+   @Override
+   public Partition getPartition(String db_name, String tbl_name, List<String> part_vals)
+       throws TException {
+     return getPartition(getDefaultCatalog(conf), db_name, tbl_name, part_vals);
+   }
+ 
+   @Override
+   public Partition getPartition(String catName, String dbName, String tblName,
+                                 List<String> partVals) throws TException {
+     Partition p = client.get_partition(prependCatalogToDbName(catName, dbName, conf), tblName, partVals);
+     return deepCopy(filterHook.filterPartition(p));
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByNames(String db_name, String tbl_name,
+       List<String> part_names) throws TException {
+     return getPartitionsByNames(getDefaultCatalog(conf), db_name, tbl_name, part_names);
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByNames(String catName, String db_name, String tbl_name,
+                                               List<String> part_names) throws TException {
+     List<Partition> parts =
+         client.get_partitions_by_names(prependCatalogToDbName(catName, db_name, conf), tbl_name, part_names);
+     return deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public PartitionValuesResponse listPartitionValues(PartitionValuesRequest request)
+       throws MetaException, TException, NoSuchObjectException {
+     if (!request.isSetCatName()) {
+       request.setCatName(getDefaultCatalog(conf));
+     }
+     return client.get_partition_values(request);
+   }
+ 
+   @Override
+   public Partition getPartitionWithAuthInfo(String db_name, String tbl_name,
+       List<String> part_vals, String user_name, List<String> group_names)
+       throws TException {
+     return getPartitionWithAuthInfo(getDefaultCatalog(conf), db_name, tbl_name, part_vals,
+         user_name, group_names);
+   }
+ 
+   @Override
+   public Partition getPartitionWithAuthInfo(String catName, String dbName, String tableName,
+                                             List<String> pvals, String userName,
+                                             List<String> groupNames) throws TException {
+     Partition p = client.get_partition_with_auth(prependCatalogToDbName(catName, dbName, conf), tableName,
+         pvals, userName, groupNames);
+     return deepCopy(filterHook.filterPartition(p));
+   }
+ 
+   @Override
+   public Table getTable(String dbname, String name) throws TException {
+     return getTable(getDefaultCatalog(conf), dbname, name);
+   }
+ 
+   @Override
++  public Table getTable(String dbname, String name,
++                 long txnId, String validWriteIdList)
++      throws MetaException, TException, NoSuchObjectException{
++    return getTable(getDefaultCatalog(conf), dbname, name,
++        txnId, validWriteIdList);
++  };
++
++  @Override
+   public Table getTable(String catName, String dbName, String tableName) throws TException {
+     GetTableRequest req = new GetTableRequest(dbName, tableName);
+     req.setCatName(catName);
+     req.setCapabilities(version);
+     Table t = client.get_table_req(req).getTable();
+     return deepCopy(filterHook.filterTable(t));
+   }
+ 
+   @Override
++  public Table getTable(String catName, String dbName, String tableName,
++    long txnId, String validWriteIdList) throws TException {
++    GetTableRequest req = new GetTableRequest(dbName, tableName);
++    req.setCatName(catName);
++    req.setCapabilities(version);
++    req.setTxnId(txnId);
++    req.setValidWriteIdList(validWriteIdList);
++    Table t = client.get_table_req(req).getTable();
++    return deepCopy(filterHook.filterTable(t));
++  }
++
++  @Override
+   public List<Table> getTableObjectsByName(String dbName, List<String> tableNames)
+       throws TException {
+     return getTableObjectsByName(getDefaultCatalog(conf), dbName, tableNames);
+   }
+ 
+   @Override
+   public List<Table> getTableObjectsByName(String catName, String dbName,
+                                            List<String> tableNames) throws TException {
+     GetTablesRequest req = new GetTablesRequest(dbName);
+     req.setCatName(catName);
+     req.setTblNames(tableNames);
+     req.setCapabilities(version);
+     List<Table> tabs = client.get_table_objects_by_name_req(req).getTables();
+     return deepCopyTables(filterHook.filterTables(tabs));
+   }
+ 
+   @Override
+   public Materialization getMaterializationInvalidationInfo(CreationMetadata cm, String validTxnList)
+       throws MetaException, InvalidOperationException, UnknownDBException, TException {
+     return client.get_materialization_invalidation_info(cm, validTxnList);
+   }
+ 
+   @Override
+   public void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm)
+       throws MetaException, InvalidOperationException, UnknownDBException, TException {
+     client.update_creation_metadata(getDefaultCatalog(conf), dbName, tableName, cm);
+   }
+ 
+   @Override
+   public void updateCreationMetadata(String catName, String dbName, String tableName,
+                                      CreationMetadata cm) throws MetaException, TException {
+     client.update_creation_metadata(catName, dbName, tableName, cm);
+ 
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<String> listTableNamesByFilter(String dbName, String filter, short maxTables)
+       throws TException {
+     return listTableNamesByFilter(getDefaultCatalog(conf), dbName, filter, maxTables);
+   }
+ 
+   @Override
+   public List<String> listTableNamesByFilter(String catName, String dbName, String filter,
+                                              int maxTables) throws TException {
+     return filterHook.filterTableNames(catName, dbName,
+         client.get_table_names_by_filter(prependCatalogToDbName(catName, dbName, conf), filter,
+             shrinkMaxtoShort(maxTables)));
+   }
+ 
+   /**
+    * @param name
+    * @return the type
+    * @throws MetaException
+    * @throws TException
+    * @throws NoSuchObjectException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type(java.lang.String)
+    */
+   public Type getType(String name) throws NoSuchObjectException, MetaException, TException {
+     return deepCopy(client.get_type(name));
+   }
+ 
+   @Override
+   public List<String> getTables(String dbname, String tablePattern) throws MetaException {
+     try {
+       return getTables(getDefaultCatalog(conf), dbname, tablePattern);
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String tablePattern)
+       throws TException {
+     return filterHook.filterTableNames(catName, dbName,
+         client.get_tables(prependCatalogToDbName(catName, dbName, conf), tablePattern));
+   }
+ 
+   @Override
+   public List<String> getTables(String dbname, String tablePattern, TableType tableType) throws MetaException {
+     try {
+       return getTables(getDefaultCatalog(conf), dbname, tablePattern, tableType);
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String tablePattern,
+                                 TableType tableType) throws TException {
+     return filterHook.filterTableNames(catName, dbName,
+         client.get_tables_by_type(prependCatalogToDbName(catName, dbName, conf), tablePattern,
+             tableType.toString()));
+   }
+ 
+   @Override
+   public List<String> getMaterializedViewsForRewriting(String dbName) throws TException {
+     return getMaterializedViewsForRewriting(getDefaultCatalog(conf), dbName);
+   }
+ 
+   @Override
+   public List<String> getMaterializedViewsForRewriting(String catName, String dbname)
+       throws MetaException {
+     try {
+       return filterHook.filterTableNames(catName, dbname,
+           client.get_materialized_views_for_rewriting(prependCatalogToDbName(catName, dbname, conf)));
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   @Override
+   public List<TableMeta> getTableMeta(String dbPatterns, String tablePatterns, List<String> tableTypes)
+       throws MetaException {
+     try {
+       return getTableMeta(getDefaultCatalog(conf), dbPatterns, tablePatterns, tableTypes);
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   @Override
+   public List<TableMeta> getTableMeta(String catName, String dbPatterns, String tablePatterns,
+                                       List<String> tableTypes) throws TException {
+     return filterHook.filterTableMetas(client.get_table_meta(prependCatalogToDbName(
+         catName, dbPatterns, conf), tablePatterns, tableTypes));
+   }
+ 
+   @Override
+   public List<String> getAllTables(String dbname) throws MetaException {
+     try {
+       return getAllTables(getDefaultCatalog(conf), dbname);
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   @Override
+   public List<String> getAllTables(String catName, String dbName) throws TException {
+     return filterHook.filterTableNames(catName, dbName, client.get_all_tables(
+         prependCatalogToDbName(catName, dbName, conf)));
+   }
+ 
+   @Override
+   public boolean tableExists(String databaseName, String tableName) throws TException {
+     return tableExists(getDefaultCatalog(conf), databaseName, tableName);
+   }
+ 
+   @Override
+   public boolean tableExists(String catName, String dbName, String tableName) throws TException {
+     try {
+       GetTableRequest req = new GetTableRequest(dbName, tableName);
+       req.setCatName(catName);
+       req.setCapabilities(version);
+       return filterHook.filterTable(client.get_table_req(req).getTable()) != null;
+     } catch (NoSuchObjectException e) {
+       return false;
+     }
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String dbName, String tblName,
+       short max) throws NoSuchObjectException, MetaException, TException {
+     return listPartitionNames(getDefaultCatalog(conf), dbName, tblName, max);
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String catName, String dbName, String tableName,
+                                          int maxParts) throws TException {
+     return filterHook.filterPartitionNames(catName, dbName, tableName,
+         client.get_partition_names(prependCatalogToDbName(catName, dbName, conf), tableName, shrinkMaxtoShort(maxParts)));
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String db_name, String tbl_name,
+       List<String> part_vals, short max_parts) throws TException {
+     return listPartitionNames(getDefaultCatalog(conf), db_name, tbl_name, part_vals, max_parts);
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String catName, String db_name, String tbl_name,
+                                          List<String> part_vals, int max_parts) throws TException {
+     return filterHook.filterPartitionNames(catName, db_name, tbl_name,
+         client.get_partition_names_ps(prependCatalogToDbName(catName, db_name, conf), tbl_name,
+             part_vals, shrinkMaxtoShort(max_parts)));
+   }
+ 
+   @Override
+   public int getNumPartitionsByFilter(String db_name, String tbl_name,
+                                       String filter) throws TException {
+     return getNumPartitionsByFilter(getDefaultCatalog(conf), db_name, tbl_name, filter);
+   }
+ 
+   @Override
+   public int getNumPartitionsByFilter(String catName, String dbName, String tableName,
+                                       String filter) throws TException {
+     return client.get_num_partitions_by_filter(prependCatalogToDbName(catName, dbName, conf), tableName,
+         filter);
+   }
+ 
+   @Override
+   public void alter_partition(String dbName, String tblName, Partition newPart)
+       throws InvalidOperationException, MetaException, TException {
+     alter_partition(getDefaultCatalog(conf), dbName, tblName, newPart, null);
+   }
+ 
+   @Override
 -  public void alter_partition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext)
++  public void alter_partition(String dbName, String tblName, Partition newPart,
++      EnvironmentContext environmentContext)
+       throws InvalidOperationException, MetaException, TException {
+     alter_partition(getDefaultCatalog(conf), dbName, tblName, newPart, environmentContext);
+   }
+ 
+   @Override
+   public void alter_partition(String catName, String dbName, String tblName, Partition newPart,
+                               EnvironmentContext environmentContext) throws TException {
 -    client.alter_partition_with_environment_context(prependCatalogToDbName(catName, dbName, conf), tblName,
 -        newPart, environmentContext);
++    AlterPartitionsRequest req = new AlterPartitionsRequest(dbName, tblName, Lists.newArrayList(newPart));
++    req.setCatName(catName);
++    req.setEnvironmentContext(environmentContext);
++    client.alter_partitions_req(req);
+   }
+ 
+   @Override
++  public void alter_partition(String dbName, String tblName, Partition newPart,
++      EnvironmentContext environmentContext, long txnId, String writeIdList)
++      throws InvalidOperationException, MetaException, TException {
++    AlterPartitionsRequest req = new AlterPartitionsRequest(
++        dbName, tblName, Lists.newArrayList(newPart));
++    req.setEnvironmentContext(environmentContext);
++    req.setTxnId(txnId);
++    req.setValidWriteIdList(writeIdList);
++    client.alter_partitions_req(req);
++  }
++
++  @Deprecated
++  @Override
+   public void alter_partitions(String dbName, String tblName, List<Partition> newParts)
+       throws TException {
 -    alter_partitions(getDefaultCatalog(conf), dbName, tblName, newParts, null);
++    alter_partitions(
++        getDefaultCatalog(conf), dbName, tblName, newParts, new EnvironmentContext(), -1, null, -1);
+   }
+ 
+   @Override
+   public void alter_partitions(String dbName, String tblName, List<Partition> newParts,
+                                EnvironmentContext environmentContext) throws TException {
 -    alter_partitions(getDefaultCatalog(conf), dbName, tblName, newParts, environmentCo

<TRUNCATED>