You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2018/07/19 21:45:07 UTC
[29/54] [abbrv] hive git commit: HIVE-19416 : merge master into
branch (Sergey Shelukhin) 0719
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
index 0000000,fb4a761..fe64a91
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
@@@ -1,0 -1,471 +1,472 @@@
+ /*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ package org.apache.hadoop.hive.metastore;
+
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.Iterator;
+ import java.util.List;
+ import java.util.Map;
+
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+ import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent;
+ import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
+ import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+ import org.apache.hadoop.hive.metastore.events.ListenerEvent;
+ import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAddPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAlterPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent;
+ import org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropTableEvent;
+ import org.apache.hadoop.hive.metastore.events.PreEventContext;
+ import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent;
+ import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+ import org.junit.Assert;
+ import org.junit.Before;
+ import org.junit.Test;
+
+ import static org.junit.Assert.assertEquals;
+ import static org.junit.Assert.assertFalse;
+ import static org.junit.Assert.assertNotSame;
+ import static org.junit.Assert.assertTrue;
+
+ import com.google.common.collect.Lists;
+
+ import org.junit.experimental.categories.Category;
+
+ /**
+ * TestMetaStoreEventListener. Test case for
+ * {@link org.apache.hadoop.hive.metastore.MetaStoreEventListener} and
+ * {@link org.apache.hadoop.hive.metastore.MetaStorePreEventListener}
+ */
+ @Category(MetastoreUnitTest.class)
+ public class TestMetaStoreEventListener {
+ private Configuration conf;
+ private HiveMetaStoreClient msc;
+
+ private static final String dbName = "hive2038";
+ private static final String tblName = "tmptbl";
+ private static final String renamed = "tmptbl2";
+ private static final String metaConfKey = "metastore.partition.name.whitelist.pattern";
+ private static final String metaConfVal = "";
+
+ @Before
+ public void setUp() throws Exception {
+ System.setProperty("hive.metastore.event.listeners",
+ DummyListener.class.getName());
+ System.setProperty("hive.metastore.pre.event.listeners",
+ DummyPreListener.class.getName());
+
+ conf = MetastoreConf.newMetastoreConf();
+
+ MetastoreConf.setVar(conf, ConfVars.PARTITION_NAME_WHITELIST_PATTERN, metaConfVal);
+ MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
+ MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+ MetaStoreTestUtils.setConfForStandloneMode(conf);
+ MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf);
+
+ msc = new HiveMetaStoreClient(conf);
+
+ msc.dropDatabase(dbName, true, true, true);
+ DummyListener.notifyList.clear();
+ DummyPreListener.notifyList.clear();
+ }
+
+ private void validateCreateDb(Database expectedDb, Database actualDb) {
+ assertEquals(expectedDb.getName(), actualDb.getName());
+ assertEquals(expectedDb.getLocationUri(), actualDb.getLocationUri());
+ }
+
+ private void validateTable(Table expectedTable, Table actualTable) {
+ assertEquals(expectedTable.getTableName(), actualTable.getTableName());
+ assertEquals(expectedTable.getDbName(), actualTable.getDbName());
+ assertEquals(expectedTable.getSd().getLocation(), actualTable.getSd().getLocation());
+ }
+
+ private void validateCreateTable(Table expectedTable, Table actualTable) {
+ validateTable(expectedTable, actualTable);
+ }
+
+ private void validateAddPartition(Partition expectedPartition, Partition actualPartition) {
+ assertEquals(expectedPartition, actualPartition);
+ }
+
+ private void validateTableInAddPartition(Table expectedTable, Table actualTable) {
+ assertEquals(expectedTable, actualTable);
+ }
+
+ private void validatePartition(Partition expectedPartition, Partition actualPartition) {
+ assertEquals(expectedPartition.getValues(), actualPartition.getValues());
+ assertEquals(expectedPartition.getDbName(), actualPartition.getDbName());
+ assertEquals(expectedPartition.getTableName(), actualPartition.getTableName());
+ }
+
+ private void validateAlterPartition(Partition expectedOldPartition,
+ Partition expectedNewPartition, String actualOldPartitionDbName,
+ String actualOldPartitionTblName,List<String> actualOldPartitionValues,
+ Partition actualNewPartition) {
+ assertEquals(expectedOldPartition.getValues(), actualOldPartitionValues);
+ assertEquals(expectedOldPartition.getDbName(), actualOldPartitionDbName);
+ assertEquals(expectedOldPartition.getTableName(), actualOldPartitionTblName);
+
+ validatePartition(expectedNewPartition, actualNewPartition);
+ }
+
+ private void validateAlterTable(Table expectedOldTable, Table expectedNewTable,
+ Table actualOldTable, Table actualNewTable) {
+ validateTable(expectedOldTable, actualOldTable);
+ validateTable(expectedNewTable, actualNewTable);
+ }
+
+ private void validateAlterTableColumns(Table expectedOldTable, Table expectedNewTable,
+ Table actualOldTable, Table actualNewTable) {
+ validateAlterTable(expectedOldTable, expectedNewTable, actualOldTable, actualNewTable);
+
+ assertEquals(expectedOldTable.getSd().getCols(), actualOldTable.getSd().getCols());
+ assertEquals(expectedNewTable.getSd().getCols(), actualNewTable.getSd().getCols());
+ }
+
+ private void validateLoadPartitionDone(String expectedTableName,
+ Map<String,String> expectedPartitionName, String actualTableName,
+ Map<String,String> actualPartitionName) {
+ assertEquals(expectedPartitionName, actualPartitionName);
+ assertEquals(expectedTableName, actualTableName);
+ }
+
+ private void validateDropPartition(Iterator<Partition> expectedPartitions, Iterator<Partition> actualPartitions) {
+ while (expectedPartitions.hasNext()){
+ assertTrue(actualPartitions.hasNext());
+ validatePartition(expectedPartitions.next(), actualPartitions.next());
+ }
+ assertFalse(actualPartitions.hasNext());
+ }
+
+ private void validateTableInDropPartition(Table expectedTable, Table actualTable) {
+ validateTable(expectedTable, actualTable);
+ }
+
+ private void validateDropTable(Table expectedTable, Table actualTable) {
+ validateTable(expectedTable, actualTable);
+ }
+
+ private void validateDropDb(Database expectedDb, Database actualDb) {
+ assertEquals(expectedDb, actualDb);
+ }
+
+ @Test
+ public void testListener() throws Exception {
+ int listSize = 0;
+
+ List<ListenerEvent> notifyList = DummyListener.notifyList;
+ List<PreEventContext> preNotifyList = DummyPreListener.notifyList;
+ assertEquals(notifyList.size(), listSize);
+ assertEquals(preNotifyList.size(), listSize);
+
+ new DatabaseBuilder()
+ .setName(dbName)
+ .create(msc, conf);
+ listSize++;
+ PreCreateDatabaseEvent preDbEvent = (PreCreateDatabaseEvent)(preNotifyList.get(preNotifyList.size() - 1));
+ Database db = msc.getDatabase(dbName);
+ assertEquals(listSize, notifyList.size());
+ assertEquals(listSize + 1, preNotifyList.size());
+ validateCreateDb(db, preDbEvent.getDatabase());
+
+ CreateDatabaseEvent dbEvent = (CreateDatabaseEvent)(notifyList.get(listSize - 1));
+ Assert.assertTrue(dbEvent.getStatus());
+ validateCreateDb(db, dbEvent.getDatabase());
+
+ Table table = new TableBuilder()
+ .inDb(db)
+ .setTableName(tblName)
+ .addCol("a", "string")
+ .addPartCol("b", "string")
+ .create(msc, conf);
+ PreCreateTableEvent preTblEvent = (PreCreateTableEvent)(preNotifyList.get(preNotifyList.size() - 1));
+ listSize++;
+ Table tbl = msc.getTable(dbName, tblName);
+ validateCreateTable(tbl, preTblEvent.getTable());
+ assertEquals(notifyList.size(), listSize);
+
+ CreateTableEvent tblEvent = (CreateTableEvent)(notifyList.get(listSize - 1));
+ Assert.assertTrue(tblEvent.getStatus());
+ validateCreateTable(tbl, tblEvent.getTable());
+
+
+ new PartitionBuilder()
+ .inTable(table)
+ .addValue("2011")
+ .addToTable(msc, conf);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+ PreAddPartitionEvent prePartEvent = (PreAddPartitionEvent)(preNotifyList.get(preNotifyList.size() - 1));
+
+ AddPartitionEvent partEvent = (AddPartitionEvent)(notifyList.get(listSize-1));
+ Assert.assertTrue(partEvent.getStatus());
+ Partition part = msc.getPartition("hive2038", "tmptbl", "b=2011");
+ Partition partAdded = partEvent.getPartitionIterator().next();
++ partAdded.setWriteId(part.getWriteId());
+ validateAddPartition(part, partAdded);
+ validateTableInAddPartition(tbl, partEvent.getTable());
+ validateAddPartition(part, prePartEvent.getPartitions().get(0));
+
+ // Test adding multiple partitions in a single partition-set, atomically.
+ int currentTime = (int)System.currentTimeMillis();
+ HiveMetaStoreClient hmsClient = new HiveMetaStoreClient(conf);
+ table = hmsClient.getTable(dbName, "tmptbl");
+ Partition partition1 = new Partition(Arrays.asList("20110101"), dbName, "tmptbl", currentTime,
+ currentTime, table.getSd(), table.getParameters());
+ Partition partition2 = new Partition(Arrays.asList("20110102"), dbName, "tmptbl", currentTime,
+ currentTime, table.getSd(), table.getParameters());
+ Partition partition3 = new Partition(Arrays.asList("20110103"), dbName, "tmptbl", currentTime,
+ currentTime, table.getSd(), table.getParameters());
+ hmsClient.add_partitions(Arrays.asList(partition1, partition2, partition3));
+ ++listSize;
+ AddPartitionEvent multiplePartitionEvent = (AddPartitionEvent)(notifyList.get(listSize-1));
+ assertEquals("Unexpected table value.", table, multiplePartitionEvent.getTable());
+ List<Partition> multiParts = Lists.newArrayList(multiplePartitionEvent.getPartitionIterator());
+ assertEquals("Unexpected number of partitions in event!", 3, multiParts.size());
+ assertEquals("Unexpected partition value.", partition1.getValues(), multiParts.get(0).getValues());
+ assertEquals("Unexpected partition value.", partition2.getValues(), multiParts.get(1).getValues());
+ assertEquals("Unexpected partition value.", partition3.getValues(), multiParts.get(2).getValues());
+
+ part.setLastAccessTime((int)(System.currentTimeMillis()/1000));
+ msc.alter_partition(dbName, tblName, part);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+ PreAlterPartitionEvent preAlterPartEvent =
+ (PreAlterPartitionEvent)preNotifyList.get(preNotifyList.size() - 1);
+
+ //the partition did not change,
+ // so the new partition should be similar to the original partition
+ Partition origP = msc.getPartition(dbName, tblName, "b=2011");
+
+ AlterPartitionEvent alterPartEvent = (AlterPartitionEvent)notifyList.get(listSize - 1);
+ Assert.assertTrue(alterPartEvent.getStatus());
+ validateAlterPartition(origP, origP, alterPartEvent.getOldPartition().getDbName(),
+ alterPartEvent.getOldPartition().getTableName(),
+ alterPartEvent.getOldPartition().getValues(), alterPartEvent.getNewPartition());
+
+
+ validateAlterPartition(origP, origP, preAlterPartEvent.getDbName(),
+ preAlterPartEvent.getTableName(), preAlterPartEvent.getNewPartition().getValues(),
+ preAlterPartEvent.getNewPartition());
+
+ List<String> part_vals = new ArrayList<>();
+ part_vals.add("c=2012");
+ int preEventListSize;
+ preEventListSize = preNotifyList.size() + 1;
+ Partition newPart = msc.appendPartition(dbName, tblName, part_vals);
+
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+ assertEquals(preNotifyList.size(), preEventListSize);
+
+ AddPartitionEvent appendPartEvent =
+ (AddPartitionEvent)(notifyList.get(listSize-1));
+ Partition partAppended = appendPartEvent.getPartitionIterator().next();
+ validateAddPartition(newPart, partAppended);
+
+ PreAddPartitionEvent preAppendPartEvent =
+ (PreAddPartitionEvent)(preNotifyList.get(preNotifyList.size() - 1));
+ validateAddPartition(newPart, preAppendPartEvent.getPartitions().get(0));
+
+ Table renamedTable = new Table(table);
+ renamedTable.setTableName(renamed);
+ msc.alter_table(dbName, tblName, renamedTable);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+ PreAlterTableEvent preAlterTableE = (PreAlterTableEvent) preNotifyList.get(preNotifyList.size() - 1);
+
+ renamedTable = msc.getTable(dbName, renamed);
+
+ AlterTableEvent alterTableE = (AlterTableEvent) notifyList.get(listSize-1);
+ Assert.assertTrue(alterTableE.getStatus());
+ validateAlterTable(tbl, renamedTable, alterTableE.getOldTable(), alterTableE.getNewTable());
+ validateAlterTable(tbl, renamedTable, preAlterTableE.getOldTable(),
+ preAlterTableE.getNewTable());
+
+ //change the table name back
+ table = new Table(renamedTable);
+ table.setTableName(tblName);
+ msc.alter_table(dbName, renamed, table);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+
+ table = msc.getTable(dbName, tblName);
+ table.getSd().addToCols(new FieldSchema("c", "int", ""));
+ msc.alter_table(dbName, tblName, table);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+ preAlterTableE = (PreAlterTableEvent) preNotifyList.get(preNotifyList.size() - 1);
+
+ Table altTable = msc.getTable(dbName, tblName);
+
+ alterTableE = (AlterTableEvent) notifyList.get(listSize-1);
+ Assert.assertTrue(alterTableE.getStatus());
+ validateAlterTableColumns(tbl, altTable, alterTableE.getOldTable(), alterTableE.getNewTable());
+ validateAlterTableColumns(tbl, altTable, preAlterTableE.getOldTable(),
+ preAlterTableE.getNewTable());
+
+ Map<String,String> kvs = new HashMap<>(1);
+ kvs.put("b", "2011");
+ msc.markPartitionForEvent("hive2038", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+
+ LoadPartitionDoneEvent partMarkEvent = (LoadPartitionDoneEvent)notifyList.get(listSize - 1);
+ Assert.assertTrue(partMarkEvent.getStatus());
+ validateLoadPartitionDone("tmptbl", kvs, partMarkEvent.getTable().getTableName(),
+ partMarkEvent.getPartitionName());
+
+ PreLoadPartitionDoneEvent prePartMarkEvent =
+ (PreLoadPartitionDoneEvent)preNotifyList.get(preNotifyList.size() - 1);
+ validateLoadPartitionDone("tmptbl", kvs, prePartMarkEvent.getTableName(),
+ prePartMarkEvent.getPartitionName());
+
+ msc.dropPartition(dbName, tblName, Collections.singletonList("2011"));
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+ PreDropPartitionEvent preDropPart = (PreDropPartitionEvent) preNotifyList.get(preNotifyList
+ .size() - 1);
+
+ DropPartitionEvent dropPart = (DropPartitionEvent)notifyList.get(listSize - 1);
+ Assert.assertTrue(dropPart.getStatus());
+ validateDropPartition(Collections.singletonList(part).iterator(), dropPart.getPartitionIterator());
+ validateTableInDropPartition(tbl, dropPart.getTable());
+
+ validateDropPartition(Collections.singletonList(part).iterator(), preDropPart.getPartitionIterator());
+ validateTableInDropPartition(tbl, preDropPart.getTable());
+
+ msc.dropTable(dbName, tblName);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+ PreDropTableEvent preDropTbl = (PreDropTableEvent)preNotifyList.get(preNotifyList.size() - 1);
+
+ DropTableEvent dropTbl = (DropTableEvent)notifyList.get(listSize-1);
+ Assert.assertTrue(dropTbl.getStatus());
+ validateDropTable(tbl, dropTbl.getTable());
+ validateDropTable(tbl, preDropTbl.getTable());
+
+ msc.dropDatabase(dbName);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+ PreDropDatabaseEvent preDropDB = (PreDropDatabaseEvent)preNotifyList.get(preNotifyList.size() - 1);
+
+ DropDatabaseEvent dropDB = (DropDatabaseEvent)notifyList.get(listSize-1);
+ Assert.assertTrue(dropDB.getStatus());
+ validateDropDb(db, dropDB.getDatabase());
+ validateDropDb(db, preDropDB.getDatabase());
+
+ msc.setMetaConf("metastore.try.direct.sql", "false");
+ ConfigChangeEvent event = (ConfigChangeEvent) notifyList.get(notifyList.size() - 1);
+ assertEquals("metastore.try.direct.sql", event.getKey());
+ assertEquals("true", event.getOldValue());
+ assertEquals("false", event.getNewValue());
+ }
+
+ @Test
+ public void testMetaConfNotifyListenersClosingClient() throws Exception {
+ HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null);
+ closingClient.setMetaConf(metaConfKey, "[test pattern modified]");
+ ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
+ assertEquals(event.getOldValue(), metaConfVal);
+ assertEquals(event.getNewValue(), "[test pattern modified]");
+ closingClient.close();
+
+ Thread.sleep(2 * 1000);
+
+ event = (ConfigChangeEvent) DummyListener.getLastEvent();
+ assertEquals(event.getOldValue(), "[test pattern modified]");
+ assertEquals(event.getNewValue(), metaConfVal);
+ }
+
+ @Test
+ public void testMetaConfNotifyListenersNonClosingClient() throws Exception {
+ HiveMetaStoreClient nonClosingClient = new HiveMetaStoreClient(conf, null);
+ nonClosingClient.setMetaConf(metaConfKey, "[test pattern modified]");
+ ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
+ assertEquals(event.getOldValue(), metaConfVal);
+ assertEquals(event.getNewValue(), "[test pattern modified]");
+ // This should also trigger meta listener notification via TServerEventHandler#deleteContext
+ nonClosingClient.getTTransport().close();
+
+ Thread.sleep(2 * 1000);
+
+ event = (ConfigChangeEvent) DummyListener.getLastEvent();
+ assertEquals(event.getOldValue(), "[test pattern modified]");
+ assertEquals(event.getNewValue(), metaConfVal);
+ }
+
+ @Test
+ public void testMetaConfDuplicateNotification() throws Exception {
+ HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null);
+ closingClient.setMetaConf(metaConfKey, metaConfVal);
+ int beforeCloseNotificationEventCounts = DummyListener.notifyList.size();
+ closingClient.close();
+
+ Thread.sleep(2 * 1000);
+
+ int afterCloseNotificationEventCounts = DummyListener.notifyList.size();
+ // Setting key to same value, should not trigger configChange event during shutdown
+ assertEquals(beforeCloseNotificationEventCounts, afterCloseNotificationEventCounts);
+ }
+
+ @Test
+ public void testMetaConfSameHandler() throws Exception {
+ HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null);
+ closingClient.setMetaConf(metaConfKey, "[test pattern modified]");
+ ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
+ int beforeCloseNotificationEventCounts = DummyListener.notifyList.size();
+ IHMSHandler beforeHandler = event.getIHMSHandler();
+ closingClient.close();
+
+ Thread.sleep(2 * 1000);
+ event = (ConfigChangeEvent) DummyListener.getLastEvent();
+ int afterCloseNotificationEventCounts = DummyListener.notifyList.size();
+ IHMSHandler afterHandler = event.getIHMSHandler();
+ // Meta-conf cleanup should trigger an event to listener
+ assertNotSame(beforeCloseNotificationEventCounts, afterCloseNotificationEventCounts);
+ // Both the handlers should be same
+ assertEquals(beforeHandler, afterHandler);
+ }
+ }
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
index 0000000,833e2bd..995271a
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
@@@ -1,0 -1,904 +1,904 @@@
+ /*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package org.apache.hadoop.hive.metastore;
+
+ import com.codahale.metrics.Counter;
+ import com.google.common.base.Supplier;
+ import com.google.common.collect.ImmutableList;
+ import org.apache.hadoop.hive.metastore.ObjectStore.RetryingExecutor;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest;
+ import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
+ import org.apache.hadoop.hive.metastore.api.Catalog;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+ import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.Function;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
+ import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PrincipalType;
+ import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+ import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
+ import org.apache.hadoop.hive.metastore.api.Role;
+ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.HiveObjectPrivilegeBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.HiveObjectRefBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.PrivilegeGrantInfoBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.messaging.EventMessage;
+ import org.apache.hadoop.hive.metastore.metrics.Metrics;
+ import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+ import org.apache.hadoop.hive.metastore.model.MNotificationLog;
+ import org.apache.hadoop.hive.metastore.model.MNotificationNextId;
+ import org.junit.Assert;
+ import org.junit.Assume;
+ import org.junit.Before;
+ import org.junit.Ignore;
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ import org.mockito.Mockito;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+
+ import javax.jdo.Query;
+ import java.sql.Connection;
+ import java.sql.DriverManager;
+ import java.sql.ResultSet;
+ import java.sql.SQLException;
+ import java.sql.Statement;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.HashMap;
+ import java.util.HashSet;
+ import java.util.List;
+ import java.util.Set;
+ import java.util.concurrent.BrokenBarrierException;
+ import java.util.concurrent.CyclicBarrier;
+ import java.util.concurrent.ExecutionException;
+ import java.util.concurrent.ExecutorService;
+ import java.util.concurrent.Executors;
+ import java.util.concurrent.TimeUnit;
+
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+
+ @Category(MetastoreUnitTest.class)
+ public class TestObjectStore {
+ private ObjectStore objectStore = null;
+ private Configuration conf;
+
+ private static final String DB1 = "testobjectstoredb1";
+ private static final String DB2 = "testobjectstoredb2";
+ private static final String TABLE1 = "testobjectstoretable1";
+ private static final String KEY1 = "testobjectstorekey1";
+ private static final String KEY2 = "testobjectstorekey2";
+ private static final String OWNER = "testobjectstoreowner";
+ private static final String USER1 = "testobjectstoreuser1";
+ private static final String ROLE1 = "testobjectstorerole1";
+ private static final String ROLE2 = "testobjectstorerole2";
+ private static final Logger LOG = LoggerFactory.getLogger(TestObjectStore.class.getName());
+
+ private static final class LongSupplier implements Supplier<Long> {
+ public long value = 0;
+
+ @Override
+ public Long get() {
+ return value;
+ }
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ conf = MetastoreConf.newMetastoreConf();
+ MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true);
+ MetaStoreTestUtils.setConfForStandloneMode(conf);
+
+ objectStore = new ObjectStore();
+ objectStore.setConf(conf);
+ dropAllStoreObjects(objectStore);
+ HiveMetaStore.HMSHandler.createDefaultCatalog(objectStore, new Warehouse(conf));
+ }
+
+ @Test
+ public void catalogs() throws MetaException, NoSuchObjectException {
+ final String names[] = {"cat1", "cat2"};
+ final String locations[] = {"loc1", "loc2"};
+ final String descriptions[] = {"description 1", "description 2"};
+
+ for (int i = 0; i < names.length; i++) {
+ Catalog cat = new CatalogBuilder()
+ .setName(names[i])
+ .setLocation(locations[i])
+ .setDescription(descriptions[i])
+ .build();
+ objectStore.createCatalog(cat);
+ }
+
+ List<String> fetchedNames = objectStore.getCatalogs();
+ Assert.assertEquals(3, fetchedNames.size());
+ for (int i = 0; i < names.length - 1; i++) {
+ Assert.assertEquals(names[i], fetchedNames.get(i));
+ Catalog cat = objectStore.getCatalog(fetchedNames.get(i));
+ Assert.assertEquals(names[i], cat.getName());
+ Assert.assertEquals(descriptions[i], cat.getDescription());
+ Assert.assertEquals(locations[i], cat.getLocationUri());
+ }
+ Catalog cat = objectStore.getCatalog(fetchedNames.get(2));
+ Assert.assertEquals(DEFAULT_CATALOG_NAME, cat.getName());
+ Assert.assertEquals(Warehouse.DEFAULT_CATALOG_COMMENT, cat.getDescription());
+ // Location will vary by system.
+
+ for (int i = 0; i < names.length; i++) objectStore.dropCatalog(names[i]);
+ fetchedNames = objectStore.getCatalogs();
+ Assert.assertEquals(1, fetchedNames.size());
+ }
+
+ @Test(expected = NoSuchObjectException.class)
+ public void getNoSuchCatalog() throws MetaException, NoSuchObjectException {
+ objectStore.getCatalog("no_such_catalog");
+ }
+
+ @Test(expected = NoSuchObjectException.class)
+ public void dropNoSuchCatalog() throws MetaException, NoSuchObjectException {
+ objectStore.dropCatalog("no_such_catalog");
+ }
+
+ // TODO test dropping non-empty catalog
+
+ /**
+ * Test database operations
+ */
+ @Test
+ public void testDatabaseOps() throws MetaException, InvalidObjectException,
+ NoSuchObjectException {
+ String catName = "tdo1_cat";
+ createTestCatalog(catName);
+ Database db1 = new Database(DB1, "description", "locationurl", null);
+ Database db2 = new Database(DB2, "description", "locationurl", null);
+ db1.setCatalogName(catName);
+ db2.setCatalogName(catName);
+ objectStore.createDatabase(db1);
+ objectStore.createDatabase(db2);
+
+ List<String> databases = objectStore.getAllDatabases(catName);
+ LOG.info("databases: " + databases);
+ Assert.assertEquals(2, databases.size());
+ Assert.assertEquals(DB1, databases.get(0));
+ Assert.assertEquals(DB2, databases.get(1));
+
+ objectStore.dropDatabase(catName, DB1);
+ databases = objectStore.getAllDatabases(catName);
+ Assert.assertEquals(1, databases.size());
+ Assert.assertEquals(DB2, databases.get(0));
+
+ objectStore.dropDatabase(catName, DB2);
+ }
+
+ /**
+ * Test table operations
+ */
+ @Test
+ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchObjectException,
+ InvalidInputException {
+ Database db1 = new DatabaseBuilder()
+ .setName(DB1)
+ .setDescription("description")
+ .setLocation("locationurl")
+ .build(conf);
+ objectStore.createDatabase(db1);
+ StorageDescriptor sd1 =
+ new StorageDescriptor(ImmutableList.of(new FieldSchema("pk_col", "double", null)),
+ "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null),
+ null, null, null);
+ HashMap<String, String> params = new HashMap<>();
+ params.put("EXTERNAL", "false");
+ Table tbl1 =
+ new Table(TABLE1, DB1, "owner", 1, 2, 3, sd1, null, params, null, null, "MANAGED_TABLE");
+ objectStore.createTable(tbl1);
+
+ List<String> tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1);
+ Assert.assertEquals(1, tables.size());
+ Assert.assertEquals(TABLE1, tables.get(0));
+
+ StorageDescriptor sd2 =
+ new StorageDescriptor(ImmutableList.of(new FieldSchema("fk_col", "double", null)),
+ "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null),
+ null, null, null);
+ Table newTbl1 = new Table("new" + TABLE1, DB1, "owner", 1, 2, 3, sd2, null, params, null, null,
+ "MANAGED_TABLE");
+
+ // Change different fields and verify they were altered
+ newTbl1.setOwner("role1");
+ newTbl1.setOwnerType(PrincipalType.ROLE);
+
- objectStore.alterTable(DEFAULT_CATALOG_NAME, DB1, TABLE1, newTbl1);
++ objectStore.alterTable(DEFAULT_CATALOG_NAME, DB1, TABLE1, newTbl1, -1, null);
+ tables = objectStore.getTables(DEFAULT_CATALOG_NAME, DB1, "new*");
+ Assert.assertEquals(1, tables.size());
+ Assert.assertEquals("new" + TABLE1, tables.get(0));
+
+ // Verify fields were altered during the alterTable operation
+ Table alteredTable = objectStore.getTable(DEFAULT_CATALOG_NAME, DB1, "new" + TABLE1);
+ Assert.assertEquals("Owner of table was not altered", newTbl1.getOwner(), alteredTable.getOwner());
+ Assert.assertEquals("Owner type of table was not altered", newTbl1.getOwnerType(), alteredTable.getOwnerType());
+
+ objectStore.createTable(tbl1);
+ tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1);
+ Assert.assertEquals(2, tables.size());
+
+ List<SQLForeignKey> foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, DB1, TABLE1, null, null);
+ Assert.assertEquals(0, foreignKeys.size());
+
+ SQLPrimaryKey pk = new SQLPrimaryKey(DB1, TABLE1, "pk_col", 1,
+ "pk_const_1", false, false, false);
+ pk.setCatName(DEFAULT_CATALOG_NAME);
+ objectStore.addPrimaryKeys(ImmutableList.of(pk));
+ SQLForeignKey fk = new SQLForeignKey(DB1, TABLE1, "pk_col",
+ DB1, "new" + TABLE1, "fk_col", 1,
+ 0, 0, "fk_const_1", "pk_const_1", false, false, false);
+ objectStore.addForeignKeys(ImmutableList.of(fk));
+
+ // Retrieve from PK side
+ foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, DB1, "new" + TABLE1);
+ Assert.assertEquals(1, foreignKeys.size());
+
+ List<SQLForeignKey> fks = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, DB1, "new" + TABLE1);
+ if (fks != null) {
+ for (SQLForeignKey fkcol : fks) {
+ objectStore.dropConstraint(fkcol.getCatName(), fkcol.getFktable_db(), fkcol.getFktable_name(),
+ fkcol.getFk_name());
+ }
+ }
+ // Retrieve from FK side
+ foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, DB1, TABLE1, null, null);
+ Assert.assertEquals(0, foreignKeys.size());
+ // Retrieve from PK side
+ foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, DB1, "new" + TABLE1);
+ Assert.assertEquals(0, foreignKeys.size());
+
+ objectStore.dropTable(DEFAULT_CATALOG_NAME, DB1, TABLE1);
+ tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1);
+ Assert.assertEquals(1, tables.size());
+
+ objectStore.dropTable(DEFAULT_CATALOG_NAME, DB1, "new" + TABLE1);
+ tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1);
+ Assert.assertEquals(0, tables.size());
+
+ objectStore.dropDatabase(db1.getCatalogName(), DB1);
+ }
+
+ private StorageDescriptor createFakeSd(String location) {
+ return new StorageDescriptor(null, location, null, null, false, 0,
+ new SerDeInfo("SerDeName", "serializationLib", null), null, null, null);
+ }
+
+
+ /**
+ * Tests partition operations
+ */
+ @Test
+ public void testPartitionOps() throws MetaException, InvalidObjectException,
+ NoSuchObjectException, InvalidInputException {
+ Database db1 = new DatabaseBuilder()
+ .setName(DB1)
+ .setDescription("description")
+ .setLocation("locationurl")
+ .build(conf);
+ objectStore.createDatabase(db1);
+ StorageDescriptor sd = createFakeSd("location");
+ HashMap<String, String> tableParams = new HashMap<>();
+ tableParams.put("EXTERNAL", "false");
+ FieldSchema partitionKey1 = new FieldSchema("Country", ColumnType.STRING_TYPE_NAME, "");
+ FieldSchema partitionKey2 = new FieldSchema("State", ColumnType.STRING_TYPE_NAME, "");
+ Table tbl1 =
+ new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, Arrays.asList(partitionKey1, partitionKey2),
+ tableParams, null, null, "MANAGED_TABLE");
+ objectStore.createTable(tbl1);
+ HashMap<String, String> partitionParams = new HashMap<>();
+ partitionParams.put("PARTITION_LEVEL_PRIVILEGE", "true");
+ List<String> value1 = Arrays.asList("US", "CA");
+ Partition part1 = new Partition(value1, DB1, TABLE1, 111, 111, sd, partitionParams);
+ part1.setCatName(DEFAULT_CATALOG_NAME);
+ objectStore.addPartition(part1);
+ List<String> value2 = Arrays.asList("US", "MA");
+ Partition part2 = new Partition(value2, DB1, TABLE1, 222, 222, sd, partitionParams);
+ part2.setCatName(DEFAULT_CATALOG_NAME);
+ objectStore.addPartition(part2);
+
+ Deadline.startTimer("getPartition");
+ List<Partition> partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10);
+ Assert.assertEquals(2, partitions.size());
+ Assert.assertEquals(111, partitions.get(0).getCreateTime());
+ Assert.assertEquals(222, partitions.get(1).getCreateTime());
+
+ int numPartitions = objectStore.getNumPartitionsByFilter(DEFAULT_CATALOG_NAME, DB1, TABLE1, "");
+ Assert.assertEquals(partitions.size(), numPartitions);
+
+ numPartitions = objectStore.getNumPartitionsByFilter(DEFAULT_CATALOG_NAME, DB1, TABLE1, "country = \"US\"");
+ Assert.assertEquals(2, numPartitions);
+
+ objectStore.dropPartition(DEFAULT_CATALOG_NAME, DB1, TABLE1, value1);
+ partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10);
+ Assert.assertEquals(1, partitions.size());
+ Assert.assertEquals(222, partitions.get(0).getCreateTime());
+
+ objectStore.dropPartition(DEFAULT_CATALOG_NAME, DB1, TABLE1, value2);
+ objectStore.dropTable(DEFAULT_CATALOG_NAME, DB1, TABLE1);
+ objectStore.dropDatabase(db1.getCatalogName(), DB1);
+ }
+
+ /**
+ * Checks if the JDO cache is able to handle directSQL partition drops in one session.
+ * @throws MetaException
+ * @throws InvalidObjectException
+ * @throws NoSuchObjectException
+ * @throws SQLException
+ */
+ @Test
+ public void testDirectSQLDropPartitionsCacheInSession()
+ throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException {
+ createPartitionedTable(false, false);
+ // query the partitions with JDO
+ Deadline.startTimer("getPartition");
+ List<Partition> partitions = objectStore.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+ 10, false, true);
+ Assert.assertEquals(3, partitions.size());
+
+ // drop partitions with directSql
+ objectStore.dropPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+ Arrays.asList("test_part_col=a0", "test_part_col=a1"), true, false);
+
+ // query the partitions with JDO, checking the cache is not causing any problem
+ partitions = objectStore.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+ 10, false, true);
+ Assert.assertEquals(1, partitions.size());
+ }
+
+ /**
+ * Checks if the JDO cache is able to handle directSQL partition drops cross sessions.
+ * @throws MetaException
+ * @throws InvalidObjectException
+ * @throws NoSuchObjectException
+ * @throws SQLException
+ */
+ @Test
+ public void testDirectSQLDropPartitionsCacheCrossSession()
+ throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException {
+ ObjectStore objectStore2 = new ObjectStore();
+ objectStore2.setConf(conf);
+
+ createPartitionedTable(false, false);
+ // query the partitions with JDO in the 1st session
+ Deadline.startTimer("getPartition");
+ List<Partition> partitions = objectStore.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+ 10, false, true);
+ Assert.assertEquals(3, partitions.size());
+
+ // query the partitions with JDO in the 2nd session
+ partitions = objectStore2.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10,
+ false, true);
+ Assert.assertEquals(3, partitions.size());
+
+ // drop partitions with directSql in the 1st session
+ objectStore.dropPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+ Arrays.asList("test_part_col=a0", "test_part_col=a1"), true, false);
+
+ // query the partitions with JDO in the 2nd session, checking the cache is not causing any
+ // problem
+ partitions = objectStore2.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+ 10, false, true);
+ Assert.assertEquals(1, partitions.size());
+ }
+
+ /**
+ * Checks if the directSQL partition drop removes every connected data from the RDBMS tables.
+ * @throws MetaException
+ * @throws InvalidObjectException
+ * @throws NoSuchObjectException
+ * @throws SQLException
+ */
+ @Test
+ public void testDirectSQLDropParitionsCleanup() throws MetaException, InvalidObjectException,
+ NoSuchObjectException, SQLException, InvalidInputException {
+
+ createPartitionedTable(true, true);
+
+ // Check, that every table in the expected state before the drop
+ checkBackendTableSize("PARTITIONS", 3);
+ checkBackendTableSize("PART_PRIVS", 3);
+ checkBackendTableSize("PART_COL_PRIVS", 3);
+ checkBackendTableSize("PART_COL_STATS", 3);
+ checkBackendTableSize("PARTITION_PARAMS", 3);
+ checkBackendTableSize("PARTITION_KEY_VALS", 3);
+ checkBackendTableSize("SD_PARAMS", 3);
+ checkBackendTableSize("BUCKETING_COLS", 3);
+ checkBackendTableSize("SKEWED_COL_NAMES", 3);
+ checkBackendTableSize("SDS", 4); // Table has an SDS
+ checkBackendTableSize("SORT_COLS", 3);
+ checkBackendTableSize("SERDE_PARAMS", 3);
+ checkBackendTableSize("SERDES", 4); // Table has a serde
+
+ // drop the partitions
+ Deadline.startTimer("dropPartitions");
+ objectStore.dropPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+ Arrays.asList("test_part_col=a0", "test_part_col=a1", "test_part_col=a2"), true, false);
+
+ // Check, if every data is dropped connected to the partitions
+ checkBackendTableSize("PARTITIONS", 0);
+ checkBackendTableSize("PART_PRIVS", 0);
+ checkBackendTableSize("PART_COL_PRIVS", 0);
+ checkBackendTableSize("PART_COL_STATS", 0);
+ checkBackendTableSize("PARTITION_PARAMS", 0);
+ checkBackendTableSize("PARTITION_KEY_VALS", 0);
+ checkBackendTableSize("SD_PARAMS", 0);
+ checkBackendTableSize("BUCKETING_COLS", 0);
+ checkBackendTableSize("SKEWED_COL_NAMES", 0);
+ checkBackendTableSize("SDS", 1); // Table has an SDS
+ checkBackendTableSize("SORT_COLS", 0);
+ checkBackendTableSize("SERDE_PARAMS", 0);
+ checkBackendTableSize("SERDES", 1); // Table has a serde
+ }
+
+ /**
+ * Creates DB1 database, TABLE1 table with 3 partitions.
+ * @param withPrivileges Should we create privileges as well
+ * @param withStatistics Should we create statitics as well
+ * @throws MetaException
+ * @throws InvalidObjectException
+ */
+ private void createPartitionedTable(boolean withPrivileges, boolean withStatistics)
+ throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException {
+ Database db1 = new DatabaseBuilder()
+ .setName(DB1)
+ .setDescription("description")
+ .setLocation("locationurl")
+ .build(conf);
+ objectStore.createDatabase(db1);
+ Table tbl1 =
+ new TableBuilder()
+ .setDbName(DB1)
+ .setTableName(TABLE1)
+ .addCol("test_col1", "int")
+ .addCol("test_col2", "int")
+ .addPartCol("test_part_col", "int")
+ .addCol("test_bucket_col", "int", "test bucket col comment")
+ .addCol("test_skewed_col", "int", "test skewed col comment")
+ .addCol("test_sort_col", "int", "test sort col comment")
+ .build(conf);
+ objectStore.createTable(tbl1);
+
+ PrivilegeBag privilegeBag = new PrivilegeBag();
+ // Create partitions for the partitioned table
+ for(int i=0; i < 3; i++) {
+ Partition part = new PartitionBuilder()
+ .inTable(tbl1)
+ .addValue("a" + i)
+ .addSerdeParam("serdeParam", "serdeParamValue")
+ .addStorageDescriptorParam("sdParam", "sdParamValue")
+ .addBucketCol("test_bucket_col")
+ .addSkewedColName("test_skewed_col")
+ .addSortCol("test_sort_col", 1)
+ .build(conf);
+ objectStore.addPartition(part);
+
+ if (withPrivileges) {
+ HiveObjectRef partitionReference = new HiveObjectRefBuilder().buildPartitionReference(part);
+ HiveObjectRef partitionColumnReference = new HiveObjectRefBuilder()
+ .buildPartitionColumnReference(tbl1, "test_part_col", part.getValues());
+ PrivilegeGrantInfo privilegeGrantInfo = new PrivilegeGrantInfoBuilder()
+ .setPrivilege("a")
+ .build();
+ HiveObjectPrivilege partitionPriv = new HiveObjectPrivilegeBuilder()
+ .setHiveObjectRef(partitionReference)
+ .setPrincipleName("a")
+ .setPrincipalType(PrincipalType.USER)
+ .setGrantInfo(privilegeGrantInfo)
+ .build();
+ privilegeBag.addToPrivileges(partitionPriv);
+ HiveObjectPrivilege partitionColPriv = new HiveObjectPrivilegeBuilder()
+ .setHiveObjectRef(partitionColumnReference)
+ .setPrincipleName("a")
+ .setPrincipalType(PrincipalType.USER)
+ .setGrantInfo(privilegeGrantInfo)
+ .build();
+ privilegeBag.addToPrivileges(partitionColPriv);
+ }
+
+ if (withStatistics) {
+ ColumnStatistics stats = new ColumnStatistics();
+ ColumnStatisticsDesc desc = new ColumnStatisticsDesc();
+ desc.setCatName(tbl1.getCatName());
+ desc.setDbName(tbl1.getDbName());
+ desc.setTableName(tbl1.getTableName());
+ desc.setPartName("test_part_col=a" + i);
+ stats.setStatsDesc(desc);
+
+ List<ColumnStatisticsObj> statsObjList = new ArrayList<>(1);
+ stats.setStatsObj(statsObjList);
+
+ ColumnStatisticsData data = new ColumnStatisticsData();
+ BooleanColumnStatsData boolStats = new BooleanColumnStatsData();
+ boolStats.setNumTrues(0);
+ boolStats.setNumFalses(0);
+ boolStats.setNumNulls(0);
+ data.setBooleanStats(boolStats);
+
+ ColumnStatisticsObj partStats = new ColumnStatisticsObj("test_part_col", "int", data);
+ statsObjList.add(partStats);
+
- objectStore.updatePartitionColumnStatistics(stats, part.getValues());
++ objectStore.updatePartitionColumnStatistics(stats, part.getValues(), -1, null, -1);
+ }
+ }
+ if (withPrivileges) {
+ objectStore.grantPrivileges(privilegeBag);
+ }
+ }
+
+ /**
+ * Checks if the HMS backend db row number is as expected. If they are not, an
+ * {@link AssertionError} is thrown.
+ * @param tableName The table in which we count the rows
+ * @param size The expected row number
+ * @throws SQLException If there is a problem connecting to / querying the backend DB
+ */
+ private void checkBackendTableSize(String tableName, int size) throws SQLException {
+ String connectionStr = MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CONNECT_URL_KEY);
+ Connection conn = DriverManager.getConnection(connectionStr);
+ Statement stmt = conn.createStatement();
+
+ ResultSet rs = stmt.executeQuery("SELECT COUNT(1) FROM " + tableName);
+ rs.next();
+ Assert.assertEquals(tableName + " table should contain " + size + " rows", size,
+ rs.getLong(1));
+ }
+
+ /**
+ * Test master keys operation
+ */
+ @Test
+ public void testMasterKeyOps() throws MetaException, NoSuchObjectException {
+ int id1 = objectStore.addMasterKey(KEY1);
+ int id2 = objectStore.addMasterKey(KEY2);
+
+ String[] keys = objectStore.getMasterKeys();
+ Assert.assertEquals(2, keys.length);
+ Assert.assertEquals(KEY1, keys[0]);
+ Assert.assertEquals(KEY2, keys[1]);
+
+ objectStore.updateMasterKey(id1, "new" + KEY1);
+ objectStore.updateMasterKey(id2, "new" + KEY2);
+ keys = objectStore.getMasterKeys();
+ Assert.assertEquals(2, keys.length);
+ Assert.assertEquals("new" + KEY1, keys[0]);
+ Assert.assertEquals("new" + KEY2, keys[1]);
+
+ objectStore.removeMasterKey(id1);
+ keys = objectStore.getMasterKeys();
+ Assert.assertEquals(1, keys.length);
+ Assert.assertEquals("new" + KEY2, keys[0]);
+
+ objectStore.removeMasterKey(id2);
+ }
+
+ /**
+ * Test role operation
+ */
+ @Test
+ public void testRoleOps() throws InvalidObjectException, MetaException, NoSuchObjectException {
+ objectStore.addRole(ROLE1, OWNER);
+ objectStore.addRole(ROLE2, OWNER);
+ List<String> roles = objectStore.listRoleNames();
+ Assert.assertEquals(2, roles.size());
+ Assert.assertEquals(ROLE2, roles.get(1));
+ Role role1 = objectStore.getRole(ROLE1);
+ Assert.assertEquals(OWNER, role1.getOwnerName());
+ objectStore.grantRole(role1, USER1, PrincipalType.USER, OWNER, PrincipalType.ROLE, true);
+ objectStore.revokeRole(role1, USER1, PrincipalType.USER, false);
+ objectStore.removeRole(ROLE1);
+ }
+
+ @Test
+ public void testDirectSqlErrorMetrics() throws Exception {
+ Configuration conf = MetastoreConf.newMetastoreConf();
+ MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.METRICS_ENABLED, true);
+ Metrics.initialize(conf);
+ MetastoreConf.setVar(conf, MetastoreConf.ConfVars.HIVE_CODAHALE_METRICS_REPORTER_CLASSES,
+ "org.apache.hadoop.hive.common.metrics.metrics2.JsonFileMetricsReporter, " +
+ "org.apache.hadoop.hive.common.metrics.metrics2.JmxMetricsReporter"
+ );
+
+ // recall setup so that we get an object store with the metrics initalized
+ setUp();
+ Counter directSqlErrors =
+ Metrics.getRegistry().getCounters().get(MetricsConstants.DIRECTSQL_ERRORS);
+
+ objectStore.new GetDbHelper(DEFAULT_CATALOG_NAME, "foo", true, true) {
+ @Override
+ protected Database getSqlResult(ObjectStore.GetHelper<Database> ctx) throws MetaException {
+ return null;
+ }
+
+ @Override
+ protected Database getJdoResult(ObjectStore.GetHelper<Database> ctx) throws MetaException,
+ NoSuchObjectException {
+ return null;
+ }
+ }.run(false);
+
+ Assert.assertEquals(0, directSqlErrors.getCount());
+
+ objectStore.new GetDbHelper(DEFAULT_CATALOG_NAME, "foo", true, true) {
+ @Override
+ protected Database getSqlResult(ObjectStore.GetHelper<Database> ctx) throws MetaException {
+ throw new RuntimeException();
+ }
+
+ @Override
+ protected Database getJdoResult(ObjectStore.GetHelper<Database> ctx) throws MetaException,
+ NoSuchObjectException {
+ return null;
+ }
+ }.run(false);
+
+ Assert.assertEquals(1, directSqlErrors.getCount());
+ }
+
+ private static void dropAllStoreObjects(RawStore store)
+ throws MetaException, InvalidObjectException, InvalidInputException {
+ try {
+ Deadline.registerIfNot(100000);
+ List<Function> functions = store.getAllFunctions(DEFAULT_CATALOG_NAME);
+ for (Function func : functions) {
+ store.dropFunction(DEFAULT_CATALOG_NAME, func.getDbName(), func.getFunctionName());
+ }
+ for (String catName : store.getCatalogs()) {
+ List<String> dbs = store.getAllDatabases(catName);
+ for (String db : dbs) {
+ List<String> tbls = store.getAllTables(DEFAULT_CATALOG_NAME, db);
+ for (String tbl : tbls) {
+ Deadline.startTimer("getPartition");
+ List<Partition> parts = store.getPartitions(DEFAULT_CATALOG_NAME, db, tbl, 100);
+ for (Partition part : parts) {
+ store.dropPartition(DEFAULT_CATALOG_NAME, db, tbl, part.getValues());
+ }
+ // Find any constraints and drop them
+ Set<String> constraints = new HashSet<>();
+ List<SQLPrimaryKey> pk = store.getPrimaryKeys(DEFAULT_CATALOG_NAME, db, tbl);
+ if (pk != null) {
+ for (SQLPrimaryKey pkcol : pk) {
+ constraints.add(pkcol.getPk_name());
+ }
+ }
+ List<SQLForeignKey> fks = store.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, db, tbl);
+ if (fks != null) {
+ for (SQLForeignKey fkcol : fks) {
+ constraints.add(fkcol.getFk_name());
+ }
+ }
+ for (String constraint : constraints) {
+ store.dropConstraint(DEFAULT_CATALOG_NAME, db, tbl, constraint);
+ }
+ store.dropTable(DEFAULT_CATALOG_NAME, db, tbl);
+ }
+ store.dropDatabase(catName, db);
+ }
+ store.dropCatalog(catName);
+ }
+ List<String> roles = store.listRoleNames();
+ for (String role : roles) {
+ store.removeRole(role);
+ }
+ } catch (NoSuchObjectException e) {
+ }
+ }
+
+ @Test
+ public void testQueryCloseOnError() throws Exception {
+ ObjectStore spy = Mockito.spy(objectStore);
+ spy.getAllDatabases(DEFAULT_CATALOG_NAME);
+ spy.getAllFunctions(DEFAULT_CATALOG_NAME);
+ spy.getAllTables(DEFAULT_CATALOG_NAME, DB1);
+ spy.getPartitionCount();
+ Mockito.verify(spy, Mockito.times(3))
+ .rollbackAndCleanup(Mockito.anyBoolean(), Mockito.<Query>anyObject());
+ }
+
+ @Test
+ public void testRetryingExecutorSleep() throws Exception {
+ RetryingExecutor re = new ObjectStore.RetryingExecutor(MetastoreConf.newMetastoreConf(), null);
+ Assert.assertTrue("invalid sleep value", re.getSleepInterval() >= 0);
+ }
+
+ @Ignore // See comment in ObjectStore.getDataSourceProps
+ @Test
+ public void testNonConfDatanucleusValueSet() {
+ String key = "datanucleus.no.such.key";
+ String value = "test_value";
+ String key1 = "blabla.no.such.key";
+ String value1 = "another_value";
+ Assume.assumeTrue(System.getProperty(key) == null);
+ Configuration localConf = MetastoreConf.newMetastoreConf();
+ MetaStoreTestUtils.setConfForStandloneMode(localConf);
+ localConf.set(key, value);
+ localConf.set(key1, value1);
+ objectStore = new ObjectStore();
+ objectStore.setConf(localConf);
+ Assert.assertEquals(value, objectStore.getProp().getProperty(key));
+ Assert.assertNull(objectStore.getProp().getProperty(key1));
+ }
+
+ /**
+ * Test notification operations
+ */
+ // TODO MS-SPLIT uncomment once we move EventMessage over
+ @Test
+ public void testNotificationOps() throws InterruptedException, MetaException {
+ final int NO_EVENT_ID = 0;
+ final int FIRST_EVENT_ID = 1;
+ final int SECOND_EVENT_ID = 2;
+
+ NotificationEvent event =
+ new NotificationEvent(0, 0, EventMessage.EventType.CREATE_DATABASE.toString(), "");
+ NotificationEventResponse eventResponse;
+ CurrentNotificationEventId eventId;
+
+ // Verify that there is no notifications available yet
+ eventId = objectStore.getCurrentNotificationEventId();
+ Assert.assertEquals(NO_EVENT_ID, eventId.getEventId());
+
+ // Verify that addNotificationEvent() updates the NotificationEvent with the new event ID
+ objectStore.addNotificationEvent(event);
+ Assert.assertEquals(FIRST_EVENT_ID, event.getEventId());
+ objectStore.addNotificationEvent(event);
+ Assert.assertEquals(SECOND_EVENT_ID, event.getEventId());
+
+ // Verify that objectStore fetches the latest notification event ID
+ eventId = objectStore.getCurrentNotificationEventId();
+ Assert.assertEquals(SECOND_EVENT_ID, eventId.getEventId());
+
+ // Verify that getNextNotification() returns all events
+ eventResponse = objectStore.getNextNotification(new NotificationEventRequest());
+ Assert.assertEquals(2, eventResponse.getEventsSize());
+ Assert.assertEquals(FIRST_EVENT_ID, eventResponse.getEvents().get(0).getEventId());
+ Assert.assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(1).getEventId());
+
+ // Verify that getNextNotification(last) returns events after a specified event
+ eventResponse = objectStore.getNextNotification(new NotificationEventRequest(FIRST_EVENT_ID));
+ Assert.assertEquals(1, eventResponse.getEventsSize());
+ Assert.assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(0).getEventId());
+
+ // Verify that getNextNotification(last) returns zero events if there are no more notifications available
+ eventResponse = objectStore.getNextNotification(new NotificationEventRequest(SECOND_EVENT_ID));
+ Assert.assertEquals(0, eventResponse.getEventsSize());
+
+ // Verify that cleanNotificationEvents() cleans up all old notifications
+ Thread.sleep(1);
+ objectStore.cleanNotificationEvents(1);
+ eventResponse = objectStore.getNextNotification(new NotificationEventRequest());
+ Assert.assertEquals(0, eventResponse.getEventsSize());
+ }
+
+ @Ignore(
+ "This test is here to allow testing with other databases like mysql / postgres etc\n"
+ + " with user changes to the code. This cannot be run on apache derby because of\n"
+ + " https://db.apache.org/derby/docs/10.10/devguide/cdevconcepts842385.html"
+ )
+ @Test
+ public void testConcurrentAddNotifications() throws ExecutionException, InterruptedException, MetaException {
+
+ final int NUM_THREADS = 10;
+ CyclicBarrier cyclicBarrier = new CyclicBarrier(NUM_THREADS,
+ () -> LoggerFactory.getLogger("test")
+ .debug(NUM_THREADS + " threads going to add notification"));
+
+ Configuration conf = MetastoreConf.newMetastoreConf();
+ MetaStoreTestUtils.setConfForStandloneMode(conf);
+ /*
+ Below are the properties that need to be set based on what database this test is going to be run
+ */
+
+ // conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER, "com.mysql.jdbc.Driver");
+ // conf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY,
+ // "jdbc:mysql://localhost:3306/metastore_db");
+ // conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME, "");
+ // conf.setVar(HiveConf.ConfVars.METASTOREPWD, "");
+
+ /*
+ we have to add this one manually as for tests the db is initialized via the metastoreDiretSQL
+ and we don't run the schema creation sql that includes the an insert for notification_sequence
+ which can be locked. the entry in notification_sequence happens via notification_event insertion.
+ */
+ objectStore.getPersistenceManager().newQuery(MNotificationLog.class, "eventType==''").execute();
+ objectStore.getPersistenceManager().newQuery(MNotificationNextId.class, "nextEventId==-1").execute();
+
+ objectStore.addNotificationEvent(
+ new NotificationEvent(0, 0,
+ EventMessage.EventType.CREATE_DATABASE.toString(),
+ "CREATE DATABASE DB initial"));
+
+ ExecutorService executorService = Executors.newFixedThreadPool(NUM_THREADS);
+ for (int i = 0; i < NUM_THREADS; i++) {
+ final int n = i;
+
+ executorService.execute(
+ () -> {
+ ObjectStore store = new ObjectStore();
+ store.setConf(conf);
+
+ String eventType = EventMessage.EventType.CREATE_DATABASE.toString();
+ NotificationEvent dbEvent =
+ new NotificationEvent(0, 0, eventType,
+ "CREATE DATABASE DB" + n);
+ System.out.println("ADDING NOTIFICATION");
+
+ try {
+ cyclicBarrier.await();
+ store.addNotificationEvent(dbEvent);
+ } catch (InterruptedException | BrokenBarrierException | MetaException e) {
+ throw new RuntimeException(e);
+ }
+ System.out.println("FINISH NOTIFICATION");
+ });
+ }
+ executorService.shutdown();
+ Assert.assertTrue(executorService.awaitTermination(15, TimeUnit.SECONDS));
+
+ // we have to setup this again as the underlying PMF keeps getting reinitialized with original
+ // reference closed
+ ObjectStore store = new ObjectStore();
+ store.setConf(conf);
+
+ NotificationEventResponse eventResponse = store.getNextNotification(
+ new NotificationEventRequest());
+ Assert.assertEquals(NUM_THREADS + 1, eventResponse.getEventsSize());
+ long previousId = 0;
+ for (NotificationEvent event : eventResponse.getEvents()) {
+ Assert.assertTrue("previous:" + previousId + " current:" + event.getEventId(),
+ previousId < event.getEventId());
+ Assert.assertTrue(previousId + 1 == event.getEventId());
+ previousId = event.getEventId();
+ }
+ }
+
+ private void createTestCatalog(String catName) throws MetaException {
+ Catalog cat = new CatalogBuilder()
+ .setName(catName)
+ .setLocation("/tmp")
+ .build();
+ objectStore.createCatalog(cat);
+ }
+ }
+
http://git-wip-us.apache.org/repos/asf/hive/blob/651e7950/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
index 0000000,717c5ee..01a8f81
mode 000000,100644..100644
--- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
+++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
@@@ -1,0 -1,233 +1,233 @@@
+ /*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ package org.apache.hadoop.hive.metastore;
+
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collections;
+ import java.util.List;
+
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog;
+ import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest;
+ import org.apache.hadoop.hive.metastore.api.AggrStats;
+ import org.apache.hadoop.hive.metastore.api.Catalog;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
+ import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.LongColumnStatsData;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.ql.io.sarg.SearchArgument;
+ import org.junit.After;
+ import org.junit.Assert;
+ import org.junit.Before;
+ import org.junit.Ignore;
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+
+ @Category(MetastoreUnitTest.class)
+ public class TestOldSchema {
+ private ObjectStore store = null;
+ private Configuration conf;
+
+ private static final Logger LOG = LoggerFactory.getLogger(TestOldSchema.class.getName());
+
+ public static class MockPartitionExpressionProxy implements PartitionExpressionProxy {
+ @Override
+ public String convertExprToFilter(byte[] expr) throws MetaException {
+ return null;
+ }
+
+ @Override
+ public boolean filterPartitionsByExpr(List<FieldSchema> partColumns, byte[] expr,
+ String defaultPartitionName,
+ List<String> partitionNames) throws MetaException {
+ return false;
+ }
+
+ @Override
+ public FileMetadataExprType getMetadataType(String inputFormat) {
+ return null;
+ }
+
+ @Override
+ public SearchArgument createSarg(byte[] expr) {
+ return null;
+ }
+
+ @Override
+ public FileFormatProxy getFileFormatProxy(FileMetadataExprType type) {
+ return null;
+ }
+ }
+
+ private byte bitVectors[][] = new byte[2][];
+
+ @Before
+ public void setUp() throws Exception {
+ conf = MetastoreConf.newMetastoreConf();
+ MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.STATS_FETCH_BITVECTOR, false);
+ MetaStoreTestUtils.setConfForStandloneMode(conf);
+
+ store = new ObjectStore();
+ store.setConf(conf);
+ dropAllStoreObjects(store);
+ HiveMetaStore.HMSHandler.createDefaultCatalog(store, new Warehouse(conf));
+
+ HyperLogLog hll = HyperLogLog.builder().build();
+ hll.addLong(1);
+ bitVectors[1] = hll.serialize();
+ hll = HyperLogLog.builder().build();
+ hll.addLong(2);
+ hll.addLong(3);
+ hll.addLong(3);
+ hll.addLong(4);
+ bitVectors[0] = hll.serialize();
+ }
+
+ @After
+ public void tearDown() {
+ }
+
+ /**
+ * Tests partition operations
+ */
+ @Ignore("HIVE-19509: Disable tests that are failing continuously")
+ @Test
+ public void testPartitionOps() throws Exception {
+ String dbName = "default";
+ String tableName = "snp";
+ Database db1 = new DatabaseBuilder()
+ .setName(dbName)
+ .setDescription("description")
+ .setLocation("locationurl")
+ .build(conf);
+ store.createDatabase(db1);
+ long now = System.currentTimeMillis();
+ List<FieldSchema> cols = new ArrayList<>();
+ cols.add(new FieldSchema("col1", "long", "nocomment"));
+ SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
+ StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0,
+ serde, null, null, Collections.emptyMap());
+ List<FieldSchema> partCols = new ArrayList<>();
+ partCols.add(new FieldSchema("ds", "string", ""));
+ Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols,
+ Collections.emptyMap(), null, null, null);
+ store.createTable(table);
+
+ Deadline.startTimer("getPartition");
+ for (int i = 0; i < 10; i++) {
+ List<String> partVal = new ArrayList<>();
+ partVal.add(String.valueOf(i));
+ StorageDescriptor psd = new StorageDescriptor(sd);
+ psd.setLocation("file:/tmp/default/hit/ds=" + partVal);
+ Partition part = new Partition(partVal, dbName, tableName, (int) now, (int) now, psd,
+ Collections.emptyMap());
+ part.setCatName(DEFAULT_CATALOG_NAME);
+ store.addPartition(part);
+ ColumnStatistics cs = new ColumnStatistics();
+ ColumnStatisticsDesc desc = new ColumnStatisticsDesc(false, dbName, tableName);
+ desc.setLastAnalyzed(now);
+ desc.setPartName("ds=" + String.valueOf(i));
+ cs.setStatsDesc(desc);
+ ColumnStatisticsObj obj = new ColumnStatisticsObj();
+ obj.setColName("col1");
+ obj.setColType("bigint");
+ ColumnStatisticsData data = new ColumnStatisticsData();
+ LongColumnStatsData dcsd = new LongColumnStatsData();
+ dcsd.setHighValue(1000 + i);
+ dcsd.setLowValue(-1000 - i);
+ dcsd.setNumNulls(i);
+ dcsd.setNumDVs(10 * i + 1);
+ dcsd.setBitVectors(bitVectors[0]);
+ data.setLongStats(dcsd);
+ obj.setStatsData(data);
+ cs.addToStatsObj(obj);
- store.updatePartitionColumnStatistics(cs, partVal);
++ store.updatePartitionColumnStatistics(cs, partVal, -1, null, -1);
+
+ }
+
+ Checker statChecker = new Checker() {
+ @Override
+ public void checkStats(AggrStats aggrStats) throws Exception {
+ Assert.assertEquals(10, aggrStats.getPartsFound());
+ Assert.assertEquals(1, aggrStats.getColStatsSize());
+ ColumnStatisticsObj cso = aggrStats.getColStats().get(0);
+ Assert.assertEquals("col1", cso.getColName());
+ Assert.assertEquals("bigint", cso.getColType());
+ LongColumnStatsData lcsd = cso.getStatsData().getLongStats();
+ Assert.assertEquals(1009, lcsd.getHighValue(), 0.01);
+ Assert.assertEquals(-1009, lcsd.getLowValue(), 0.01);
+ Assert.assertEquals(45, lcsd.getNumNulls());
+ Assert.assertEquals(91, lcsd.getNumDVs());
+ }
+ };
+ List<String> partNames = new ArrayList<>();
+ for (int i = 0; i < 10; i++) {
+ partNames.add("ds=" + i);
+ }
+ AggrStats aggrStats = store.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tableName, partNames,
+ Arrays.asList("col1"));
+ statChecker.checkStats(aggrStats);
+
+ }
+
+ private interface Checker {
+ void checkStats(AggrStats aggrStats) throws Exception;
+ }
+
+ private static void dropAllStoreObjects(RawStore store) throws MetaException,
+ InvalidObjectException, InvalidInputException {
+ try {
+ Deadline.registerIfNot(100000);
+ Deadline.startTimer("getPartition");
+ List<String> dbs = store.getAllDatabases(DEFAULT_CATALOG_NAME);
+ for (int i = 0; i < dbs.size(); i++) {
+ String db = dbs.get(i);
+ List<String> tbls = store.getAllTables(DEFAULT_CATALOG_NAME, db);
+ for (String tbl : tbls) {
+ List<Partition> parts = store.getPartitions(DEFAULT_CATALOG_NAME, db, tbl, 100);
+ for (Partition part : parts) {
+ store.dropPartition(DEFAULT_CATALOG_NAME, db, tbl, part.getValues());
+ }
+ store.dropTable(DEFAULT_CATALOG_NAME, db, tbl);
+ }
+ store.dropDatabase(DEFAULT_CATALOG_NAME, db);
+ }
+ } catch (NoSuchObjectException e) {
+ }
+ }
+
+ }