You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ga...@apache.org on 2018/01/16 21:36:30 UTC
[2/7] hive git commit: HIVE-17982 Move metastore specific itests.
This closes #279. (Alan Gates, reviewed by Peter Vary)
http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
new file mode 100644
index 0000000..d4cedb0
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
@@ -0,0 +1,264 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.common.ValidTxnList;
+import org.apache.hadoop.hive.common.ValidReadTxnList;
+import org.apache.hadoop.hive.metastore.api.DataOperationType;
+import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
+import org.apache.hadoop.hive.metastore.api.LockResponse;
+import org.apache.hadoop.hive.metastore.api.LockState;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.List;
+
+/**
+ * Unit tests for {@link org.apache.hadoop.hive.metastore.HiveMetaStoreClient}. For now this just has
+ * transaction and locking tests. The goal here is not to test all
+ * functionality possible through the interface, as all permutations of DB
+ * operations should be tested in the appropriate DB handler classes. The
+ * goal is to test that we can properly pass the messages through the thrift
+ * service.
+ *
+ * This is in the ql directory rather than the metastore directory because it
+ * required the hive-exec jar, and hive-exec jar already depends on
+ * hive-metastore jar, thus I can't make hive-metastore depend on hive-exec.
+ */
+public class TestHiveMetaStoreTxns {
+
+ private final Configuration conf = MetastoreConf.newMetastoreConf();
+ private IMetaStoreClient client;
+
+ @Test
+ public void testTxns() throws Exception {
+ List<Long> tids = client.openTxns("me", 3).getTxn_ids();
+ Assert.assertEquals(1L, (long) tids.get(0));
+ Assert.assertEquals(2L, (long) tids.get(1));
+ Assert.assertEquals(3L, (long) tids.get(2));
+ client.rollbackTxn(1);
+ client.commitTxn(2);
+ ValidTxnList validTxns = client.getValidTxns();
+ Assert.assertFalse(validTxns.isTxnValid(1));
+ Assert.assertTrue(validTxns.isTxnValid(2));
+ Assert.assertFalse(validTxns.isTxnValid(3));
+ Assert.assertFalse(validTxns.isTxnValid(4));
+ }
+
+ @Test
+ public void testOpenTxnNotExcluded() throws Exception {
+ List<Long> tids = client.openTxns("me", 3).getTxn_ids();
+ Assert.assertEquals(1L, (long) tids.get(0));
+ Assert.assertEquals(2L, (long) tids.get(1));
+ Assert.assertEquals(3L, (long) tids.get(2));
+ client.rollbackTxn(1);
+ client.commitTxn(2);
+ ValidTxnList validTxns = client.getValidTxns(3);
+ Assert.assertFalse(validTxns.isTxnValid(1));
+ Assert.assertTrue(validTxns.isTxnValid(2));
+ Assert.assertTrue(validTxns.isTxnValid(3));
+ Assert.assertFalse(validTxns.isTxnValid(4));
+ }
+
+ @Test
+ public void testTxnRange() throws Exception {
+ ValidTxnList validTxns = client.getValidTxns();
+ Assert.assertEquals(ValidTxnList.RangeResponse.NONE,
+ validTxns.isTxnRangeValid(1L, 3L));
+ List<Long> tids = client.openTxns("me", 5).getTxn_ids();
+
+ HeartbeatTxnRangeResponse rsp = client.heartbeatTxnRange(1, 5);
+ Assert.assertEquals(0, rsp.getNosuch().size());
+ Assert.assertEquals(0, rsp.getAborted().size());
+
+ client.rollbackTxn(1L);
+ client.commitTxn(2L);
+ client.commitTxn(3L);
+ client.commitTxn(4L);
+ validTxns = client.getValidTxns();
+ System.out.println("validTxns = " + validTxns);
+ Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
+ validTxns.isTxnRangeValid(2L, 2L));
+ Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
+ validTxns.isTxnRangeValid(2L, 3L));
+ Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
+ validTxns.isTxnRangeValid(2L, 4L));
+ Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
+ validTxns.isTxnRangeValid(3L, 4L));
+
+ Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
+ validTxns.isTxnRangeValid(1L, 4L));
+ Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
+ validTxns.isTxnRangeValid(2L, 5L));
+ Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
+ validTxns.isTxnRangeValid(1L, 2L));
+ Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
+ validTxns.isTxnRangeValid(4L, 5L));
+
+ Assert.assertEquals(ValidTxnList.RangeResponse.NONE,
+ validTxns.isTxnRangeValid(1L, 1L));
+ Assert.assertEquals(ValidTxnList.RangeResponse.NONE,
+ validTxns.isTxnRangeValid(5L, 10L));
+
+ validTxns = new ValidReadTxnList("10:5:4,5,6:");
+ Assert.assertEquals(ValidTxnList.RangeResponse.NONE,
+ validTxns.isTxnRangeValid(4,6));
+ Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
+ validTxns.isTxnRangeValid(7, 10));
+ Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
+ validTxns.isTxnRangeValid(7, 11));
+ Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
+ validTxns.isTxnRangeValid(3, 6));
+ Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
+ validTxns.isTxnRangeValid(4, 7));
+ Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
+ validTxns.isTxnRangeValid(1, 12));
+ Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
+ validTxns.isTxnRangeValid(1, 3));
+ }
+
+ @Test
+ public void testLocks() throws Exception {
+ LockRequestBuilder rqstBuilder = new LockRequestBuilder();
+ rqstBuilder.addLockComponent(new LockComponentBuilder()
+ .setDbName("mydb")
+ .setTableName("mytable")
+ .setPartitionName("mypartition")
+ .setExclusive()
+ .setOperationType(DataOperationType.NO_TXN)
+ .build());
+ rqstBuilder.addLockComponent(new LockComponentBuilder()
+ .setDbName("mydb")
+ .setTableName("yourtable")
+ .setSemiShared()
+ .setOperationType(DataOperationType.NO_TXN)
+ .build());
+ rqstBuilder.addLockComponent(new LockComponentBuilder()
+ .setDbName("yourdb")
+ .setOperationType(DataOperationType.NO_TXN)
+ .setShared()
+ .build());
+ rqstBuilder.setUser("fred");
+
+ LockResponse res = client.lock(rqstBuilder.build());
+ Assert.assertEquals(1L, res.getLockid());
+ Assert.assertEquals(LockState.ACQUIRED, res.getState());
+
+ res = client.checkLock(1);
+ Assert.assertEquals(1L, res.getLockid());
+ Assert.assertEquals(LockState.ACQUIRED, res.getState());
+
+ client.heartbeat(0, 1);
+
+ client.unlock(1);
+ }
+
+ @Test
+ public void testLocksWithTxn() throws Exception {
+ long txnid = client.openTxn("me");
+
+ LockRequestBuilder rqstBuilder = new LockRequestBuilder();
+ rqstBuilder.setTransactionId(txnid)
+ .addLockComponent(new LockComponentBuilder()
+ .setDbName("mydb")
+ .setTableName("mytable")
+ .setPartitionName("mypartition")
+ .setSemiShared()
+ .setOperationType(DataOperationType.UPDATE)
+ .build())
+ .addLockComponent(new LockComponentBuilder()
+ .setDbName("mydb")
+ .setTableName("yourtable")
+ .setSemiShared()
+ .setOperationType(DataOperationType.UPDATE)
+ .build())
+ .addLockComponent(new LockComponentBuilder()
+ .setDbName("yourdb")
+ .setShared()
+ .setOperationType(DataOperationType.SELECT)
+ .build())
+ .setUser("fred");
+
+ LockResponse res = client.lock(rqstBuilder.build());
+ Assert.assertEquals(1L, res.getLockid());
+ Assert.assertEquals(LockState.ACQUIRED, res.getState());
+
+ res = client.checkLock(1);
+ Assert.assertEquals(1L, res.getLockid());
+ Assert.assertEquals(LockState.ACQUIRED, res.getState());
+
+ client.heartbeat(txnid, 1);
+
+ client.commitTxn(txnid);
+ }
+
+ @Test
+ public void stringifyValidTxns() throws Exception {
+ // Test with just high water mark
+ ValidTxnList validTxns = new ValidReadTxnList("1:" + Long.MAX_VALUE + "::");
+ String asString = validTxns.toString();
+ Assert.assertEquals("1:" + Long.MAX_VALUE + "::", asString);
+ validTxns = new ValidReadTxnList(asString);
+ Assert.assertEquals(1, validTxns.getHighWatermark());
+ Assert.assertNotNull(validTxns.getInvalidTransactions());
+ Assert.assertEquals(0, validTxns.getInvalidTransactions().length);
+ asString = validTxns.toString();
+ Assert.assertEquals("1:" + Long.MAX_VALUE + "::", asString);
+ validTxns = new ValidReadTxnList(asString);
+ Assert.assertEquals(1, validTxns.getHighWatermark());
+ Assert.assertNotNull(validTxns.getInvalidTransactions());
+ Assert.assertEquals(0, validTxns.getInvalidTransactions().length);
+
+ // Test with open transactions
+ validTxns = new ValidReadTxnList("10:3:5:3");
+ asString = validTxns.toString();
+ if (!asString.equals("10:3:3:5") && !asString.equals("10:3:5:3")) {
+ Assert.fail("Unexpected string value " + asString);
+ }
+ validTxns = new ValidReadTxnList(asString);
+ Assert.assertEquals(10, validTxns.getHighWatermark());
+ Assert.assertNotNull(validTxns.getInvalidTransactions());
+ Assert.assertEquals(2, validTxns.getInvalidTransactions().length);
+ boolean sawThree = false, sawFive = false;
+ for (long tid : validTxns.getInvalidTransactions()) {
+ if (tid == 3) sawThree = true;
+ else if (tid == 5) sawFive = true;
+ else Assert.fail("Unexpected value " + tid);
+ }
+ Assert.assertTrue(sawThree);
+ Assert.assertTrue(sawFive);
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ MetaStoreTestUtils.setConfForStandloneMode(conf);
+ TxnDbUtil.setConfValues(conf);
+ TxnDbUtil.prepDb(conf);
+ client = new HiveMetaStoreClient(conf);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ TxnDbUtil.cleanDb(conf);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java
new file mode 100644
index 0000000..fd75247
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java
@@ -0,0 +1,187 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
+import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+import org.apache.hadoop.hive.metastore.events.ListenerEvent;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * TestHiveMetaStoreWithEnvironmentContext. Test case for _with_environment_context
+ * calls in {@link org.apache.hadoop.hive.metastore.HiveMetaStore}
+ */
+public class TestHiveMetaStoreWithEnvironmentContext {
+
+ private Configuration conf;
+ private HiveMetaStoreClient msc;
+ private EnvironmentContext envContext;
+ private final Database db = new Database();
+ private Table table;
+ private Partition partition;
+
+ private static final String dbName = "hive3252";
+ private static final String tblName = "tmptbl";
+ private static final String renamed = "tmptbl2";
+
+ @Before
+ public void setUp() throws Exception {
+ System.setProperty("hive.metastore.event.listeners",
+ DummyListener.class.getName());
+
+ conf = MetastoreConf.newMetastoreConf();
+ MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
+ MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+ MetaStoreTestUtils.setConfForStandloneMode(conf);
+ int port = MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf);
+ MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
+ msc = new HiveMetaStoreClient(conf);
+
+ msc.dropDatabase(dbName, true, true);
+
+ Map<String, String> envProperties = new HashMap<>();
+ envProperties.put("hadoop.job.ugi", "test_user");
+ envContext = new EnvironmentContext(envProperties);
+
+ db.setName(dbName);
+
+ table = new TableBuilder()
+ .setDbName(dbName)
+ .setTableName(tblName)
+ .addTableParam("a", "string")
+ .addPartCol("b", "string")
+ .addCol("a", "string")
+ .addCol("b", "string")
+ .build();
+
+
+ partition = new PartitionBuilder()
+ .fromTable(table)
+ .addValue("2011")
+ .build();
+
+ DummyListener.notifyList.clear();
+ }
+
+ @Test
+ public void testEnvironmentContext() throws Exception {
+ int listSize = 0;
+
+ List<ListenerEvent> notifyList = DummyListener.notifyList;
+ assertEquals(notifyList.size(), listSize);
+ msc.createDatabase(db);
+ listSize++;
+ assertEquals(listSize, notifyList.size());
+ CreateDatabaseEvent dbEvent = (CreateDatabaseEvent)(notifyList.get(listSize - 1));
+ assert dbEvent.getStatus();
+
+ msc.createTable(table, envContext);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+ CreateTableEvent tblEvent = (CreateTableEvent)(notifyList.get(listSize - 1));
+ assert tblEvent.getStatus();
+ assertEquals(envContext, tblEvent.getEnvironmentContext());
+
+ table = msc.getTable(dbName, tblName);
+
+ partition.getSd().setLocation(table.getSd().getLocation() + "/part1");
+ msc.add_partition(partition, envContext);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+ AddPartitionEvent partEvent = (AddPartitionEvent)(notifyList.get(listSize-1));
+ assert partEvent.getStatus();
+ assertEquals(envContext, partEvent.getEnvironmentContext());
+
+ List<String> partVals = new ArrayList<>();
+ partVals.add("2012");
+ msc.appendPartition(dbName, tblName, partVals, envContext);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+ AddPartitionEvent appendPartEvent = (AddPartitionEvent)(notifyList.get(listSize-1));
+ assert appendPartEvent.getStatus();
+ assertEquals(envContext, appendPartEvent.getEnvironmentContext());
+
+ table.setTableName(renamed);
+ msc.alter_table_with_environmentContext(dbName, tblName, table, envContext);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+ AlterTableEvent alterTableEvent = (AlterTableEvent) notifyList.get(listSize-1);
+ assert alterTableEvent.getStatus();
+ assertEquals(envContext, alterTableEvent.getEnvironmentContext());
+
+ table.setTableName(tblName);
+ msc.alter_table_with_environmentContext(dbName, renamed, table, envContext);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+
+ List<String> dropPartVals = new ArrayList<>();
+ dropPartVals.add("2011");
+ msc.dropPartition(dbName, tblName, dropPartVals, envContext);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+ DropPartitionEvent dropPartEvent = (DropPartitionEvent)notifyList.get(listSize - 1);
+ assert dropPartEvent.getStatus();
+ assertEquals(envContext, dropPartEvent.getEnvironmentContext());
+
+ msc.dropPartition(dbName, tblName, "b=2012", true, envContext);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+ DropPartitionEvent dropPartByNameEvent = (DropPartitionEvent)notifyList.get(listSize - 1);
+ assert dropPartByNameEvent.getStatus();
+ assertEquals(envContext, dropPartByNameEvent.getEnvironmentContext());
+
+ msc.dropTable(dbName, tblName, true, false, envContext);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+ DropTableEvent dropTblEvent = (DropTableEvent)notifyList.get(listSize-1);
+ assert dropTblEvent.getStatus();
+ assertEquals(envContext, dropTblEvent.getEnvironmentContext());
+
+ msc.dropDatabase(dbName);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+
+ DropDatabaseEvent dropDB = (DropDatabaseEvent)notifyList.get(listSize-1);
+ assert dropDB.getStatus();
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java
new file mode 100644
index 0000000..6854a93
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.thrift.TException;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestMarkPartition {
+
+ protected Configuration conf;
+
+ @Before
+ public void setUp() throws Exception {
+
+ System.setProperty("hive.metastore.event.clean.freq", "1s");
+ System.setProperty("hive.metastore.event.expiry.duration", "2s");
+ conf = MetastoreConf.newMetastoreConf();
+ MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+ MetaStoreTestUtils.setConfForStandloneMode(conf);
+
+ }
+
+ @Test
+ public void testMarkingPartitionSet() throws TException, InterruptedException {
+ HiveMetaStoreClient msc = new HiveMetaStoreClient(conf);
+
+ final String dbName = "hive2215";
+ msc.dropDatabase(dbName, true, true, true);
+ Database db = new DatabaseBuilder()
+ .setName(dbName)
+ .build();
+ msc.createDatabase(db);
+
+ final String tableName = "tmptbl";
+ msc.dropTable(dbName, tableName, true, true);
+ Table table = new TableBuilder()
+ .setDbName(dbName)
+ .setTableName(tableName)
+ .addCol("a", "string")
+ .addPartCol("b", "string")
+ .build();
+ msc.createTable(table);
+
+ Partition part = new PartitionBuilder()
+ .fromTable(table)
+ .addValue("2011")
+ .build();
+ msc.add_partition(part);
+ Map<String,String> kvs = new HashMap<>();
+ kvs.put("b", "'2011'");
+ msc.markPartitionForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE);
+ Assert.assertTrue(msc.isPartitionMarkedForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE));
+ Thread.sleep(3000);
+ Assert.assertFalse(msc.isPartitionMarkedForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE));
+
+ kvs.put("b", "'2012'");
+ Assert.assertFalse(msc.isPartitionMarkedForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE));
+ try {
+ msc.markPartitionForEvent(dbName, "tmptbl2", kvs, PartitionEventType.LOAD_DONE);
+ Assert.fail("Expected UnknownTableException");
+ } catch (UnknownTableException e) {
+ // All good
+ } catch(Exception e){
+ Assert.fail("Expected UnknownTableException");
+ }
+ try{
+ msc.isPartitionMarkedForEvent(dbName, "tmptbl2", kvs, PartitionEventType.LOAD_DONE);
+ Assert.fail("Expected UnknownTableException");
+ } catch (UnknownTableException e) {
+ // All good
+ } catch(Exception e){
+ Assert.fail("Expected UnknownTableException, received " + e.getClass().getName());
+ }
+ kvs.put("a", "'2012'");
+ try {
+ msc.isPartitionMarkedForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE);
+ Assert.fail("Expected InvalidPartitionException");
+ } catch (InvalidPartitionException e) {
+ // All good
+ } catch(Exception e){
+ Assert.fail("Expected InvalidPartitionException, received " + e.getClass().getName());
+ }
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java
new file mode 100644
index 0000000..ac1cc4c
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.junit.Before;
+
+public class TestMarkPartitionRemote extends TestMarkPartition {
+
+ @Before
+ public void startServer() throws Exception {
+ int port = MetaStoreTestUtils.findFreePort();
+ MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
+ MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
+ MetaStoreTestUtils.setConfForStandloneMode(conf);
+ MetaStoreTestUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), conf);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java
new file mode 100644
index 0000000..25e3a95
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * TestMetaStoreEventListener. Test case for
+ * {@link org.apache.hadoop.hive.metastore.MetaStoreEndFunctionListener}
+ */
+public class TestMetaStoreEndFunctionListener {
+ private Configuration conf;
+ private HiveMetaStoreClient msc;
+
+ @Before
+ public void setUp() throws Exception {
+ System.setProperty("hive.metastore.event.listeners",
+ DummyListener.class.getName());
+ System.setProperty("hive.metastore.pre.event.listeners",
+ DummyPreListener.class.getName());
+ System.setProperty("hive.metastore.end.function.listeners",
+ DummyEndFunctionListener.class.getName());
+ conf = MetastoreConf.newMetastoreConf();
+ MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
+ MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+ MetaStoreTestUtils.setConfForStandloneMode(conf);
+ int port = MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf);
+ MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
+ msc = new HiveMetaStoreClient(conf);
+ }
+
+ @Test
+ public void testEndFunctionListener() throws Exception {
+ /* Objective here is to ensure that when exceptions are thrown in HiveMetaStore in API methods
+ * they bubble up and are stored in the MetaStoreEndFunctionContext objects
+ */
+ String dbName = "hive3524";
+ String tblName = "tmptbl";
+ int listSize;
+
+ Database db = new DatabaseBuilder()
+ .setName(dbName)
+ .build();
+ msc.createDatabase(db);
+
+ try {
+ msc.getDatabase("UnknownDB");
+ } catch (Exception e) {
+ // All good
+ }
+ listSize = DummyEndFunctionListener.funcNameList.size();
+ String func_name = DummyEndFunctionListener.funcNameList.get(listSize-1);
+ MetaStoreEndFunctionContext context = DummyEndFunctionListener.contextList.get(listSize-1);
+ assertEquals(func_name,"get_database");
+ assertFalse(context.isSuccess());
+ Exception e = context.getException();
+ assertTrue((e!=null));
+ assertTrue((e instanceof NoSuchObjectException));
+ assertEquals(context.getInputTableName(), null);
+
+ String unknownTable = "UnknownTable";
+ Table table = new TableBuilder()
+ .setDbName(db)
+ .setTableName(tblName)
+ .addCol("a", "string")
+ .addPartCol("b", "string")
+ .build();
+ msc.createTable(table);
+ try {
+ msc.getTable(dbName, unknownTable);
+ } catch (Exception e1) {
+ // All good
+ }
+ listSize = DummyEndFunctionListener.funcNameList.size();
+ func_name = DummyEndFunctionListener.funcNameList.get(listSize-1);
+ context = DummyEndFunctionListener.contextList.get(listSize-1);
+ assertEquals(func_name,"get_table");
+ assertFalse(context.isSuccess());
+ e = context.getException();
+ assertTrue((e!=null));
+ assertTrue((e instanceof NoSuchObjectException));
+ assertEquals(context.getInputTableName(), unknownTable);
+
+ try {
+ msc.getPartition("hive3524", tblName, "b=2012");
+ } catch (Exception e2) {
+ // All good
+ }
+ listSize = DummyEndFunctionListener.funcNameList.size();
+ func_name = DummyEndFunctionListener.funcNameList.get(listSize-1);
+ context = DummyEndFunctionListener.contextList.get(listSize-1);
+ assertEquals(func_name,"get_partition_by_name");
+ assertFalse(context.isSuccess());
+ e = context.getException();
+ assertTrue((e!=null));
+ assertTrue((e instanceof NoSuchObjectException));
+ assertEquals(context.getInputTableName(), tblName);
+ try {
+ msc.dropTable(dbName, unknownTable);
+ } catch (Exception e4) {
+ // All good
+ }
+ listSize = DummyEndFunctionListener.funcNameList.size();
+ func_name = DummyEndFunctionListener.funcNameList.get(listSize-1);
+ context = DummyEndFunctionListener.contextList.get(listSize-1);
+ assertEquals(func_name,"get_table");
+ assertFalse(context.isSuccess());
+ e = context.getException();
+ assertTrue((e!=null));
+ assertTrue((e instanceof NoSuchObjectException));
+ assertEquals(context.getInputTableName(), "UnknownTable");
+
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
new file mode 100644
index 0000000..1508ee5
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
@@ -0,0 +1,557 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Index;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.IndexBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.events.AddIndexEvent;
+import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterIndexEvent;
+import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent;
+import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
+import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.DropIndexEvent;
+import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+import org.apache.hadoop.hive.metastore.events.ListenerEvent;
+import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
+import org.apache.hadoop.hive.metastore.events.PreAddIndexEvent;
+import org.apache.hadoop.hive.metastore.events.PreAddPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.PreAlterIndexEvent;
+import org.apache.hadoop.hive.metastore.events.PreAlterPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent;
+import org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropIndexEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropTableEvent;
+import org.apache.hadoop.hive.metastore.events.PreEventContext;
+import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertTrue;
+
+import com.google.common.collect.Lists;
+
+import junit.framework.TestCase;
+
+/**
+ * TestMetaStoreEventListener. Test case for
+ * {@link org.apache.hadoop.hive.metastore.MetaStoreEventListener} and
+ * {@link org.apache.hadoop.hive.metastore.MetaStorePreEventListener}
+ */
+public class TestMetaStoreEventListener {
+ private Configuration conf;
+ private HiveMetaStoreClient msc;
+
+ private static final String dbName = "hive2038";
+ private static final String tblName = "tmptbl";
+ private static final String renamed = "tmptbl2";
+ private static final String metaConfKey = "metastore.partition.name.whitelist.pattern";
+ private static final String metaConfVal = "";
+
+ @Before
+ public void setUp() throws Exception {
+ System.setProperty("hive.metastore.event.listeners",
+ DummyListener.class.getName());
+ System.setProperty("hive.metastore.pre.event.listeners",
+ DummyPreListener.class.getName());
+
+ conf = MetastoreConf.newMetastoreConf();
+
+ MetastoreConf.setVar(conf, ConfVars.PARTITION_NAME_WHITELIST_PATTERN, metaConfVal);
+ MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
+ MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+ MetaStoreTestUtils.setConfForStandloneMode(conf);
+ int port = MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf);
+ MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
+
+ msc = new HiveMetaStoreClient(conf);
+
+ msc.dropDatabase(dbName, true, true, true);
+ DummyListener.notifyList.clear();
+ DummyPreListener.notifyList.clear();
+ }
+
+ private void validateCreateDb(Database expectedDb, Database actualDb) {
+ assertEquals(expectedDb.getName(), actualDb.getName());
+ assertEquals(expectedDb.getLocationUri(), actualDb.getLocationUri());
+ }
+
+ private void validateTable(Table expectedTable, Table actualTable) {
+ assertEquals(expectedTable.getTableName(), actualTable.getTableName());
+ assertEquals(expectedTable.getDbName(), actualTable.getDbName());
+ assertEquals(expectedTable.getSd().getLocation(), actualTable.getSd().getLocation());
+ }
+
+ private void validateCreateTable(Table expectedTable, Table actualTable) {
+ validateTable(expectedTable, actualTable);
+ }
+
+ private void validateAddPartition(Partition expectedPartition, Partition actualPartition) {
+ assertEquals(expectedPartition, actualPartition);
+ }
+
+ private void validateTableInAddPartition(Table expectedTable, Table actualTable) {
+ assertEquals(expectedTable, actualTable);
+ }
+
+ private void validatePartition(Partition expectedPartition, Partition actualPartition) {
+ assertEquals(expectedPartition.getValues(), actualPartition.getValues());
+ assertEquals(expectedPartition.getDbName(), actualPartition.getDbName());
+ assertEquals(expectedPartition.getTableName(), actualPartition.getTableName());
+ }
+
+ private void validateAlterPartition(Partition expectedOldPartition,
+ Partition expectedNewPartition, String actualOldPartitionDbName,
+ String actualOldPartitionTblName,List<String> actualOldPartitionValues,
+ Partition actualNewPartition) {
+ assertEquals(expectedOldPartition.getValues(), actualOldPartitionValues);
+ assertEquals(expectedOldPartition.getDbName(), actualOldPartitionDbName);
+ assertEquals(expectedOldPartition.getTableName(), actualOldPartitionTblName);
+
+ validatePartition(expectedNewPartition, actualNewPartition);
+ }
+
+ private void validateAlterTable(Table expectedOldTable, Table expectedNewTable,
+ Table actualOldTable, Table actualNewTable) {
+ validateTable(expectedOldTable, actualOldTable);
+ validateTable(expectedNewTable, actualNewTable);
+ }
+
+ private void validateAlterTableColumns(Table expectedOldTable, Table expectedNewTable,
+ Table actualOldTable, Table actualNewTable) {
+ validateAlterTable(expectedOldTable, expectedNewTable, actualOldTable, actualNewTable);
+
+ assertEquals(expectedOldTable.getSd().getCols(), actualOldTable.getSd().getCols());
+ assertEquals(expectedNewTable.getSd().getCols(), actualNewTable.getSd().getCols());
+ }
+
+ private void validateLoadPartitionDone(String expectedTableName,
+ Map<String,String> expectedPartitionName, String actualTableName,
+ Map<String,String> actualPartitionName) {
+ assertEquals(expectedPartitionName, actualPartitionName);
+ assertEquals(expectedTableName, actualTableName);
+ }
+
+ private void validateDropPartition(Iterator<Partition> expectedPartitions, Iterator<Partition> actualPartitions) {
+ while (expectedPartitions.hasNext()){
+ assertTrue(actualPartitions.hasNext());
+ validatePartition(expectedPartitions.next(), actualPartitions.next());
+ }
+ assertFalse(actualPartitions.hasNext());
+ }
+
+ private void validateTableInDropPartition(Table expectedTable, Table actualTable) {
+ validateTable(expectedTable, actualTable);
+ }
+
+ private void validateDropTable(Table expectedTable, Table actualTable) {
+ validateTable(expectedTable, actualTable);
+ }
+
+ private void validateDropDb(Database expectedDb, Database actualDb) {
+ assertEquals(expectedDb, actualDb);
+ }
+
+ private void validateIndex(Index expectedIndex, Index actualIndex) {
+ assertEquals(expectedIndex.getDbName(), actualIndex.getDbName());
+ assertEquals(expectedIndex.getIndexName(), actualIndex.getIndexName());
+ assertEquals(expectedIndex.getIndexHandlerClass(), actualIndex.getIndexHandlerClass());
+ assertEquals(expectedIndex.getOrigTableName(), actualIndex.getOrigTableName());
+ assertEquals(expectedIndex.getIndexTableName(), actualIndex.getIndexTableName());
+ assertEquals(expectedIndex.getSd().getLocation(), actualIndex.getSd().getLocation());
+ }
+
+ private void validateAddIndex(Index expectedIndex, Index actualIndex) {
+ validateIndex(expectedIndex, actualIndex);
+ }
+
+ private void validateAlterIndex(Index expectedOldIndex, Index actualOldIndex,
+ Index expectedNewIndex, Index actualNewIndex) {
+ validateIndex(expectedOldIndex, actualOldIndex);
+ validateIndex(expectedNewIndex, actualNewIndex);
+ }
+
+ private void validateDropIndex(Index expectedIndex, Index actualIndex) {
+ validateIndex(expectedIndex, actualIndex);
+ }
+
+ @Test
+ public void testListener() throws Exception {
+ int listSize = 0;
+
+ List<ListenerEvent> notifyList = DummyListener.notifyList;
+ List<PreEventContext> preNotifyList = DummyPreListener.notifyList;
+ assertEquals(notifyList.size(), listSize);
+ assertEquals(preNotifyList.size(), listSize);
+
+ Database db = new DatabaseBuilder()
+ .setName(dbName)
+ .build();
+ msc.createDatabase(db);
+ listSize++;
+ PreCreateDatabaseEvent preDbEvent = (PreCreateDatabaseEvent)(preNotifyList.get(preNotifyList.size() - 1));
+ db = msc.getDatabase(dbName);
+ assertEquals(listSize, notifyList.size());
+ assertEquals(listSize + 1, preNotifyList.size());
+ validateCreateDb(db, preDbEvent.getDatabase());
+
+ CreateDatabaseEvent dbEvent = (CreateDatabaseEvent)(notifyList.get(listSize - 1));
+ Assert.assertTrue(dbEvent.getStatus());
+ validateCreateDb(db, dbEvent.getDatabase());
+
+ Table table = new TableBuilder()
+ .setDbName(db)
+ .setTableName(tblName)
+ .addCol("a", "string")
+ .addPartCol("b", "string")
+ .build();
+ msc.createTable(table);
+ PreCreateTableEvent preTblEvent = (PreCreateTableEvent)(preNotifyList.get(preNotifyList.size() - 1));
+ listSize++;
+ Table tbl = msc.getTable(dbName, tblName);
+ validateCreateTable(tbl, preTblEvent.getTable());
+ assertEquals(notifyList.size(), listSize);
+
+ CreateTableEvent tblEvent = (CreateTableEvent)(notifyList.get(listSize - 1));
+ Assert.assertTrue(tblEvent.getStatus());
+ validateCreateTable(tbl, tblEvent.getTable());
+
+ String indexName = "tmptbl_i";
+ Index index = new IndexBuilder()
+ .setDbAndTableName(table)
+ .setIndexName(indexName)
+ .addCol("a", "string")
+ .setDeferredRebuild(true)
+ .addIndexParam("prop1", "val1")
+ .addIndexParam("prop2", "val2")
+ .build();
+ Table indexTable = new TableBuilder()
+ .fromIndex(index)
+ .build();
+ msc.createIndex(index, indexTable);
+ listSize += 2; // creates index table internally
+ assertEquals(notifyList.size(), listSize);
+
+ AddIndexEvent addIndexEvent = (AddIndexEvent)notifyList.get(listSize - 1);
+ Assert.assertTrue(addIndexEvent.getStatus());
+ PreAddIndexEvent preAddIndexEvent = (PreAddIndexEvent)(preNotifyList.get(preNotifyList.size() - 2));
+
+ Index oldIndex = msc.getIndex(dbName, tblName, indexName);
+
+ validateAddIndex(oldIndex, addIndexEvent.getIndex());
+
+ validateAddIndex(oldIndex, preAddIndexEvent.getIndex());
+
+ Index alteredIndex = new Index(oldIndex);
+ alteredIndex.getParameters().put("prop3", "val3");
+ msc.alter_index(dbName, tblName, indexName, alteredIndex);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+
+ Index newIndex = msc.getIndex(dbName, tblName, indexName);
+
+ AlterIndexEvent alterIndexEvent = (AlterIndexEvent) notifyList.get(listSize - 1);
+ Assert.assertTrue(alterIndexEvent.getStatus());
+ validateAlterIndex(oldIndex, alterIndexEvent.getOldIndex(),
+ newIndex, alterIndexEvent.getNewIndex());
+
+ PreAlterIndexEvent preAlterIndexEvent = (PreAlterIndexEvent) (preNotifyList.get(preNotifyList.size() - 1));
+ validateAlterIndex(oldIndex, preAlterIndexEvent.getOldIndex(),
+ newIndex, preAlterIndexEvent.getNewIndex());
+
+ msc.dropIndex(dbName, tblName, indexName, true);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+
+ DropIndexEvent dropIndexEvent = (DropIndexEvent) notifyList.get(listSize - 1);
+ Assert.assertTrue(dropIndexEvent.getStatus());
+ validateDropIndex(newIndex, dropIndexEvent.getIndex());
+
+ PreDropIndexEvent preDropIndexEvent = (PreDropIndexEvent) (preNotifyList.get(preNotifyList.size() - 1));
+ validateDropIndex(newIndex, preDropIndexEvent.getIndex());
+
+ Partition part = new PartitionBuilder()
+ .fromTable(table)
+ .addValue("2011")
+ .build();
+ msc.add_partition(part);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+ PreAddPartitionEvent prePartEvent = (PreAddPartitionEvent)(preNotifyList.get(preNotifyList.size() - 1));
+
+ AddPartitionEvent partEvent = (AddPartitionEvent)(notifyList.get(listSize-1));
+ Assert.assertTrue(partEvent.getStatus());
+ part = msc.getPartition("hive2038", "tmptbl", "b=2011");
+ Partition partAdded = partEvent.getPartitionIterator().next();
+ validateAddPartition(part, partAdded);
+ validateTableInAddPartition(tbl, partEvent.getTable());
+ validateAddPartition(part, prePartEvent.getPartitions().get(0));
+
+ // Test adding multiple partitions in a single partition-set, atomically.
+ int currentTime = (int)System.currentTimeMillis();
+ HiveMetaStoreClient hmsClient = new HiveMetaStoreClient(conf);
+ table = hmsClient.getTable(dbName, "tmptbl");
+ Partition partition1 = new Partition(Arrays.asList("20110101"), dbName, "tmptbl", currentTime,
+ currentTime, table.getSd(), table.getParameters());
+ Partition partition2 = new Partition(Arrays.asList("20110102"), dbName, "tmptbl", currentTime,
+ currentTime, table.getSd(), table.getParameters());
+ Partition partition3 = new Partition(Arrays.asList("20110103"), dbName, "tmptbl", currentTime,
+ currentTime, table.getSd(), table.getParameters());
+ hmsClient.add_partitions(Arrays.asList(partition1, partition2, partition3));
+ ++listSize;
+ AddPartitionEvent multiplePartitionEvent = (AddPartitionEvent)(notifyList.get(listSize-1));
+ assertEquals("Unexpected table value.", table, multiplePartitionEvent.getTable());
+ List<Partition> multiParts = Lists.newArrayList(multiplePartitionEvent.getPartitionIterator());
+ assertEquals("Unexpected number of partitions in event!", 3, multiParts.size());
+ assertEquals("Unexpected partition value.", partition1.getValues(), multiParts.get(0).getValues());
+ assertEquals("Unexpected partition value.", partition2.getValues(), multiParts.get(1).getValues());
+ assertEquals("Unexpected partition value.", partition3.getValues(), multiParts.get(2).getValues());
+
+ part.setLastAccessTime((int)(System.currentTimeMillis()/1000));
+ msc.alter_partition(dbName, tblName, part);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+ PreAlterPartitionEvent preAlterPartEvent =
+ (PreAlterPartitionEvent)preNotifyList.get(preNotifyList.size() - 1);
+
+ //the partition did not change,
+ // so the new partition should be similar to the original partition
+ Partition origP = msc.getPartition(dbName, tblName, "b=2011");
+
+ AlterPartitionEvent alterPartEvent = (AlterPartitionEvent)notifyList.get(listSize - 1);
+ Assert.assertTrue(alterPartEvent.getStatus());
+ validateAlterPartition(origP, origP, alterPartEvent.getOldPartition().getDbName(),
+ alterPartEvent.getOldPartition().getTableName(),
+ alterPartEvent.getOldPartition().getValues(), alterPartEvent.getNewPartition());
+
+
+ validateAlterPartition(origP, origP, preAlterPartEvent.getDbName(),
+ preAlterPartEvent.getTableName(), preAlterPartEvent.getNewPartition().getValues(),
+ preAlterPartEvent.getNewPartition());
+
+ List<String> part_vals = new ArrayList<>();
+ part_vals.add("c=2012");
+ int preEventListSize;
+ preEventListSize = preNotifyList.size() + 1;
+ Partition newPart = msc.appendPartition(dbName, tblName, part_vals);
+
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+ assertEquals(preNotifyList.size(), preEventListSize);
+
+ AddPartitionEvent appendPartEvent =
+ (AddPartitionEvent)(notifyList.get(listSize-1));
+ Partition partAppended = appendPartEvent.getPartitionIterator().next();
+ validateAddPartition(newPart, partAppended);
+
+ PreAddPartitionEvent preAppendPartEvent =
+ (PreAddPartitionEvent)(preNotifyList.get(preNotifyList.size() - 1));
+ validateAddPartition(newPart, preAppendPartEvent.getPartitions().get(0));
+
+ Table renamedTable = new Table(table);
+ renamedTable.setTableName(renamed);
+ msc.alter_table(dbName, tblName, renamedTable);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+ PreAlterTableEvent preAlterTableE = (PreAlterTableEvent) preNotifyList.get(preNotifyList.size() - 1);
+
+ renamedTable = msc.getTable(dbName, renamed);
+
+ AlterTableEvent alterTableE = (AlterTableEvent) notifyList.get(listSize-1);
+ Assert.assertTrue(alterTableE.getStatus());
+ validateAlterTable(tbl, renamedTable, alterTableE.getOldTable(), alterTableE.getNewTable());
+ validateAlterTable(tbl, renamedTable, preAlterTableE.getOldTable(),
+ preAlterTableE.getNewTable());
+
+ //change the table name back
+ table = new Table(renamedTable);
+ table.setTableName(tblName);
+ msc.alter_table(dbName, renamed, table);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+
+ table = msc.getTable(dbName, tblName);
+ table.getSd().addToCols(new FieldSchema("c", "int", ""));
+ msc.alter_table(dbName, tblName, table);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+ preAlterTableE = (PreAlterTableEvent) preNotifyList.get(preNotifyList.size() - 1);
+
+ Table altTable = msc.getTable(dbName, tblName);
+
+ alterTableE = (AlterTableEvent) notifyList.get(listSize-1);
+ Assert.assertTrue(alterTableE.getStatus());
+ validateAlterTableColumns(tbl, altTable, alterTableE.getOldTable(), alterTableE.getNewTable());
+ validateAlterTableColumns(tbl, altTable, preAlterTableE.getOldTable(),
+ preAlterTableE.getNewTable());
+
+ Map<String,String> kvs = new HashMap<>(1);
+ kvs.put("b", "2011");
+ msc.markPartitionForEvent("hive2038", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+
+ LoadPartitionDoneEvent partMarkEvent = (LoadPartitionDoneEvent)notifyList.get(listSize - 1);
+ Assert.assertTrue(partMarkEvent.getStatus());
+ validateLoadPartitionDone("tmptbl", kvs, partMarkEvent.getTable().getTableName(),
+ partMarkEvent.getPartitionName());
+
+ PreLoadPartitionDoneEvent prePartMarkEvent =
+ (PreLoadPartitionDoneEvent)preNotifyList.get(preNotifyList.size() - 1);
+ validateLoadPartitionDone("tmptbl", kvs, prePartMarkEvent.getTableName(),
+ prePartMarkEvent.getPartitionName());
+
+ msc.dropPartition(dbName, tblName, Collections.singletonList("2011"));
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+ PreDropPartitionEvent preDropPart = (PreDropPartitionEvent) preNotifyList.get(preNotifyList
+ .size() - 1);
+
+ DropPartitionEvent dropPart = (DropPartitionEvent)notifyList.get(listSize - 1);
+ Assert.assertTrue(dropPart.getStatus());
+ validateDropPartition(Collections.singletonList(part).iterator(), dropPart.getPartitionIterator());
+ validateTableInDropPartition(tbl, dropPart.getTable());
+
+ validateDropPartition(Collections.singletonList(part).iterator(), preDropPart.getPartitionIterator());
+ validateTableInDropPartition(tbl, preDropPart.getTable());
+
+ msc.dropTable(dbName, tblName);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+ PreDropTableEvent preDropTbl = (PreDropTableEvent)preNotifyList.get(preNotifyList.size() - 1);
+
+ DropTableEvent dropTbl = (DropTableEvent)notifyList.get(listSize-1);
+ Assert.assertTrue(dropTbl.getStatus());
+ validateDropTable(tbl, dropTbl.getTable());
+ validateDropTable(tbl, preDropTbl.getTable());
+
+ msc.dropDatabase(dbName);
+ listSize++;
+ assertEquals(notifyList.size(), listSize);
+ PreDropDatabaseEvent preDropDB = (PreDropDatabaseEvent)preNotifyList.get(preNotifyList.size() - 1);
+
+ DropDatabaseEvent dropDB = (DropDatabaseEvent)notifyList.get(listSize-1);
+ Assert.assertTrue(dropDB.getStatus());
+ validateDropDb(db, dropDB.getDatabase());
+ validateDropDb(db, preDropDB.getDatabase());
+
+ msc.setMetaConf("metastore.try.direct.sql", "false");
+ ConfigChangeEvent event = (ConfigChangeEvent) notifyList.get(notifyList.size() - 1);
+ assertEquals("metastore.try.direct.sql", event.getKey());
+ assertEquals("true", event.getOldValue());
+ assertEquals("false", event.getNewValue());
+ }
+
+ @Test
+ public void testMetaConfNotifyListenersClosingClient() throws Exception {
+ HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null);
+ closingClient.setMetaConf(metaConfKey, "[test pattern modified]");
+ ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
+ assertEquals(event.getOldValue(), metaConfVal);
+ assertEquals(event.getNewValue(), "[test pattern modified]");
+ closingClient.close();
+
+ Thread.sleep(2 * 1000);
+
+ event = (ConfigChangeEvent) DummyListener.getLastEvent();
+ assertEquals(event.getOldValue(), "[test pattern modified]");
+ assertEquals(event.getNewValue(), metaConfVal);
+ }
+
+ @Test
+ public void testMetaConfNotifyListenersNonClosingClient() throws Exception {
+ HiveMetaStoreClient nonClosingClient = new HiveMetaStoreClient(conf, null);
+ nonClosingClient.setMetaConf(metaConfKey, "[test pattern modified]");
+ ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
+ assertEquals(event.getOldValue(), metaConfVal);
+ assertEquals(event.getNewValue(), "[test pattern modified]");
+ // This should also trigger meta listener notification via TServerEventHandler#deleteContext
+ nonClosingClient.getTTransport().close();
+
+ Thread.sleep(2 * 1000);
+
+ event = (ConfigChangeEvent) DummyListener.getLastEvent();
+ assertEquals(event.getOldValue(), "[test pattern modified]");
+ assertEquals(event.getNewValue(), metaConfVal);
+ }
+
+ @Test
+ public void testMetaConfDuplicateNotification() throws Exception {
+ HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null);
+ closingClient.setMetaConf(metaConfKey, metaConfVal);
+ int beforeCloseNotificationEventCounts = DummyListener.notifyList.size();
+ closingClient.close();
+
+ Thread.sleep(2 * 1000);
+
+ int afterCloseNotificationEventCounts = DummyListener.notifyList.size();
+ // Setting key to same value, should not trigger configChange event during shutdown
+ assertEquals(beforeCloseNotificationEventCounts, afterCloseNotificationEventCounts);
+ }
+
+ @Test
+ public void testMetaConfSameHandler() throws Exception {
+ HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null);
+ closingClient.setMetaConf(metaConfKey, "[test pattern modified]");
+ ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
+ int beforeCloseNotificationEventCounts = DummyListener.notifyList.size();
+ IHMSHandler beforeHandler = event.getIHMSHandler();
+ closingClient.close();
+
+ Thread.sleep(2 * 1000);
+ event = (ConfigChangeEvent) DummyListener.getLastEvent();
+ int afterCloseNotificationEventCounts = DummyListener.notifyList.size();
+ IHMSHandler afterHandler = event.getIHMSHandler();
+ // Meta-conf cleanup should trigger an event to listener
+ assertNotSame(beforeCloseNotificationEventCounts, afterCloseNotificationEventCounts);
+ // Both the handlers should be same
+ assertEquals(beforeHandler, afterHandler);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java
new file mode 100644
index 0000000..de729c7
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.events.ListenerEvent;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import junit.framework.TestCase;
+
+/**
+ * Ensure that the status of MetaStore events depend on the RawStore's commit status.
+ */
+public class TestMetaStoreEventListenerOnlyOnCommit {
+
+ private Configuration conf;
+ private HiveMetaStoreClient msc;
+
+ @Before
+ public void setUp() throws Exception {
+ DummyRawStoreControlledCommit.setCommitSucceed(true);
+
+ System.setProperty(ConfVars.EVENT_LISTENERS.toString(), DummyListener.class.getName());
+ System.setProperty(ConfVars.RAW_STORE_IMPL.toString(),
+ DummyRawStoreControlledCommit.class.getName());
+
+ conf = MetastoreConf.newMetastoreConf();
+ MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
+ MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+ MetaStoreTestUtils.setConfForStandloneMode(conf);
+ int port = MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf);
+ MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
+ msc = new HiveMetaStoreClient(conf);
+
+ DummyListener.notifyList.clear();
+ }
+
+ @Test
+ public void testEventStatus() throws Exception {
+ int listSize = 0;
+ List<ListenerEvent> notifyList = DummyListener.notifyList;
+ assertEquals(notifyList.size(), listSize);
+
+ String dbName = "tmpDb";
+ Database db = new DatabaseBuilder()
+ .setName(dbName)
+ .build();
+ msc.createDatabase(db);
+
+ listSize += 1;
+ notifyList = DummyListener.notifyList;
+ assertEquals(notifyList.size(), listSize);
+ assertTrue(DummyListener.getLastEvent().getStatus());
+
+ String tableName = "unittest_TestMetaStoreEventListenerOnlyOnCommit";
+ Table table = new TableBuilder()
+ .setDbName(db)
+ .setTableName(tableName)
+ .addCol("id", "int")
+ .addPartCol("ds", "string")
+ .build();
+ msc.createTable(table);
+ listSize += 1;
+ notifyList = DummyListener.notifyList;
+ assertEquals(notifyList.size(), listSize);
+ assertTrue(DummyListener.getLastEvent().getStatus());
+
+ Partition part = new PartitionBuilder()
+ .fromTable(table)
+ .addValue("foo1")
+ .build();
+ msc.add_partition(part);
+ listSize += 1;
+ notifyList = DummyListener.notifyList;
+ assertEquals(notifyList.size(), listSize);
+ assertTrue(DummyListener.getLastEvent().getStatus());
+
+ DummyRawStoreControlledCommit.setCommitSucceed(false);
+
+ part = new PartitionBuilder()
+ .fromTable(table)
+ .addValue("foo2")
+ .build();
+ msc.add_partition(part);
+ listSize += 1;
+ notifyList = DummyListener.notifyList;
+ assertEquals(notifyList.size(), listSize);
+ assertFalse(DummyListener.getLastEvent().getStatus());
+
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerWithOldConf.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerWithOldConf.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerWithOldConf.java
new file mode 100644
index 0000000..82e39f1
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerWithOldConf.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import com.google.common.collect.Lists;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.Index;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.IndexBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.events.AddIndexEvent;
+import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterIndexEvent;
+import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent;
+import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
+import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.DropIndexEvent;
+import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+import org.apache.hadoop.hive.metastore.events.ListenerEvent;
+import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
+import org.apache.hadoop.hive.metastore.events.PreAddIndexEvent;
+import org.apache.hadoop.hive.metastore.events.PreAddPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.PreAlterIndexEvent;
+import org.apache.hadoop.hive.metastore.events.PreAlterPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent;
+import org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropIndexEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.PreDropTableEvent;
+import org.apache.hadoop.hive.metastore.events.PreEventContext;
+import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Mostly same tests as TestMetaStoreEventListener, but using old hive conf values instead of new
+ * metastore conf values.
+ */
+public class TestMetaStoreEventListenerWithOldConf {
+ private Configuration conf;
+
+ private static final String metaConfKey = "hive.metastore.partition.name.whitelist.pattern";
+ private static final String metaConfVal = "";
+
+ @Before
+ public void setUp() throws Exception {
+ System.setProperty("hive.metastore.event.listeners",
+ DummyListener.class.getName());
+ System.setProperty("hive.metastore.pre.event.listeners",
+ DummyPreListener.class.getName());
+
+ int port = MetaStoreTestUtils.findFreePort();
+ conf = MetastoreConf.newMetastoreConf();
+
+ MetastoreConf.setVar(conf, ConfVars.PARTITION_NAME_WHITELIST_PATTERN, metaConfVal);
+ MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
+ MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
+ MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+ MetaStoreTestUtils.setConfForStandloneMode(conf);
+ MetaStoreTestUtils.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), conf);
+
+ DummyListener.notifyList.clear();
+ DummyPreListener.notifyList.clear();
+ }
+
+ @Test
+ public void testMetaConfNotifyListenersClosingClient() throws Exception {
+ HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null);
+ closingClient.setMetaConf(metaConfKey, "[test pattern modified]");
+ ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
+ assertEquals(event.getOldValue(), metaConfVal);
+ assertEquals(event.getNewValue(), "[test pattern modified]");
+ closingClient.close();
+
+ Thread.sleep(2 * 1000);
+
+ event = (ConfigChangeEvent) DummyListener.getLastEvent();
+ assertEquals(event.getOldValue(), "[test pattern modified]");
+ assertEquals(event.getNewValue(), metaConfVal);
+ }
+
+ @Test
+ public void testMetaConfNotifyListenersNonClosingClient() throws Exception {
+ HiveMetaStoreClient nonClosingClient = new HiveMetaStoreClient(conf, null);
+ nonClosingClient.setMetaConf(metaConfKey, "[test pattern modified]");
+ ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
+ assertEquals(event.getOldValue(), metaConfVal);
+ assertEquals(event.getNewValue(), "[test pattern modified]");
+ // This should also trigger meta listener notification via TServerEventHandler#deleteContext
+ nonClosingClient.getTTransport().close();
+
+ Thread.sleep(2 * 1000);
+
+ event = (ConfigChangeEvent) DummyListener.getLastEvent();
+ assertEquals(event.getOldValue(), "[test pattern modified]");
+ assertEquals(event.getNewValue(), metaConfVal);
+ }
+
+ @Test
+ public void testMetaConfDuplicateNotification() throws Exception {
+ HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null);
+ closingClient.setMetaConf(metaConfKey, metaConfVal);
+ int beforeCloseNotificationEventCounts = DummyListener.notifyList.size();
+ closingClient.close();
+
+ Thread.sleep(2 * 1000);
+
+ int afterCloseNotificationEventCounts = DummyListener.notifyList.size();
+ // Setting key to same value, should not trigger configChange event during shutdown
+ assertEquals(beforeCloseNotificationEventCounts, afterCloseNotificationEventCounts);
+ }
+
+ @Test
+ public void testMetaConfSameHandler() throws Exception {
+ HiveMetaStoreClient closingClient = new HiveMetaStoreClient(conf, null);
+ closingClient.setMetaConf(metaConfKey, "[test pattern modified]");
+ ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
+ int beforeCloseNotificationEventCounts = DummyListener.notifyList.size();
+ IHMSHandler beforeHandler = event.getHandler();
+ closingClient.close();
+
+ Thread.sleep(2 * 1000);
+ event = (ConfigChangeEvent) DummyListener.getLastEvent();
+ int afterCloseNotificationEventCounts = DummyListener.notifyList.size();
+ IHMSHandler afterHandler = event.getHandler();
+ // Meta-conf cleanup should trigger an event to listener
+ assertNotSame(beforeCloseNotificationEventCounts, afterCloseNotificationEventCounts);
+ // Both the handlers should be same
+ assertEquals(beforeHandler, afterHandler);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java
new file mode 100644
index 0000000..f692b0a
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * TestMetaStoreInitListener. Test case for
+ * {@link org.apache.hadoop.hive.metastore.MetaStoreInitListener}
+ */
+public class TestMetaStoreInitListener {
+ private Configuration conf;
+
+ @Before
+ public void setUp() throws Exception {
+ System.setProperty("hive.metastore.init.hooks", DummyMetaStoreInitListener.class.getName());
+ conf = MetastoreConf.newMetastoreConf();
+ MetastoreConf.setLongVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES, 3);
+ MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, false);
+ MetaStoreTestUtils.setConfForStandloneMode(conf);
+ int port = MetaStoreTestUtils.startMetaStoreWithRetry(HadoopThriftAuthBridge.getBridge(), conf);
+ MetastoreConf.setVar(conf, ConfVars.THRIFT_URIS, "thrift://localhost:" + port);
+ }
+
+ @Test
+ public void testMetaStoreInitListener() throws Exception {
+ // DummyMataStoreInitListener's onInit will be called at HMSHandler
+ // initialization, and set this to true
+ Assert.assertTrue(DummyMetaStoreInitListener.wasCalled);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java
new file mode 100644
index 0000000..3fdce48
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test for unwrapping InvocationTargetException, which is thrown from
+ * constructor of listener class
+ */
+public class TestMetaStoreListenersError {
+
+ @Test
+ public void testInitListenerException() throws Throwable {
+
+ System.setProperty("hive.metastore.init.hooks", ErrorInitListener.class.getName());
+ Configuration conf = MetastoreConf.newMetastoreConf();
+ MetaStoreTestUtils.setConfForStandloneMode(conf);
+ int port = MetaStoreTestUtils.findFreePort();
+ try {
+ HiveMetaStore.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), conf);
+ Assert.fail();
+ } catch (Throwable throwable) {
+ Assert.assertEquals(MetaException.class, throwable.getClass());
+ Assert.assertEquals(
+ "Failed to instantiate listener named: " +
+ "org.apache.hadoop.hive.metastore.TestMetaStoreListenersError$ErrorInitListener, " +
+ "reason: java.lang.IllegalArgumentException: exception on constructor",
+ throwable.getMessage());
+ }
+ }
+
+ @Test
+ public void testEventListenerException() throws Throwable {
+
+ System.setProperty("hive.metastore.init.hooks", "");
+ System.setProperty("hive.metastore.event.listeners", ErrorEventListener.class.getName());
+ Configuration conf = MetastoreConf.newMetastoreConf();
+ MetaStoreTestUtils.setConfForStandloneMode(conf);
+ int port = MetaStoreTestUtils.findFreePort();
+ try {
+ HiveMetaStore.startMetaStore(port, HadoopThriftAuthBridge.getBridge(), conf);
+ Assert.fail();
+ } catch (Throwable throwable) {
+ Assert.assertEquals(MetaException.class, throwable.getClass());
+ Assert.assertEquals(
+ "Failed to instantiate listener named: " +
+ "org.apache.hadoop.hive.metastore.TestMetaStoreListenersError$ErrorEventListener, " +
+ "reason: java.lang.IllegalArgumentException: exception on constructor",
+ throwable.getMessage());
+ }
+ }
+
+ public static class ErrorInitListener extends MetaStoreInitListener {
+
+ public ErrorInitListener(Configuration config) {
+ super(config);
+ throw new IllegalArgumentException("exception on constructor");
+ }
+
+ public void onInit(MetaStoreInitContext context) throws MetaException {
+ }
+ }
+
+ public static class ErrorEventListener extends MetaStoreEventListener {
+
+ public ErrorEventListener(Configuration config) {
+ super(config);
+ throw new IllegalArgumentException("exception on constructor");
+ }
+ }
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
index 24ea62e..372dee6 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
@@ -129,8 +129,7 @@ public class TestObjectStore {
@Before
public void setUp() throws Exception {
Configuration conf = MetastoreConf.newMetastoreConf();
- MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS,
- MockPartitionExpressionProxy.class.getName());
+ MetaStoreTestUtils.setConfForStandloneMode(conf);
objectStore = new ObjectStore();
objectStore.setConf(conf);
@@ -462,8 +461,7 @@ public class TestObjectStore {
String value1 = "another_value";
Assume.assumeTrue(System.getProperty(key) == null);
Configuration localConf = MetastoreConf.newMetastoreConf();
- MetastoreConf.setVar(localConf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS,
- MockPartitionExpressionProxy.class.getName());
+ MetaStoreTestUtils.setConfForStandloneMode(localConf);
localConf.set(key, value);
localConf.set(key1, value1);
objectStore = new ObjectStore();
@@ -537,8 +535,7 @@ public class TestObjectStore {
.debug(NUM_THREADS + " threads going to add notification"));
Configuration conf = MetastoreConf.newMetastoreConf();
- MetastoreConf.setVar(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS,
- MockPartitionExpressionProxy.class.getName());
+ MetaStoreTestUtils.setConfForStandloneMode(conf);
/*
Below are the properties that need to be set based on what database this test is going to be run
*/
http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreInitRetry.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreInitRetry.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreInitRetry.java
new file mode 100644
index 0000000..b4e5a85
--- /dev/null
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreInitRetry.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.concurrent.TimeUnit;
+
+import javax.jdo.JDOCanRetryException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+
+public class TestObjectStoreInitRetry {
+ private static final Logger LOG = LoggerFactory.getLogger(TestObjectStoreInitRetry.class);
+
+ private static int injectConnectFailure = 0;
+
+ private static void setInjectConnectFailure(int x){
+ injectConnectFailure = x;
+ }
+
+ private static int getInjectConnectFailure(){
+ return injectConnectFailure;
+ }
+
+ private static void decrementInjectConnectFailure(){
+ injectConnectFailure--;
+ }
+
+ @BeforeClass
+ public static void oneTimeSetup() throws SQLException {
+ // dummy instantiation to make sure any static/ctor code blocks of that
+ // driver are loaded and ready to go.
+ DriverManager.registerDriver(new FakeDerby());
+ }
+
+ @AfterClass
+ public static void oneTimeTearDown() throws SQLException {
+ DriverManager.deregisterDriver(new FakeDerby());
+ }
+
+ static void misbehave() throws RuntimeException{
+ TestObjectStoreInitRetry.debugTrace();
+ if (TestObjectStoreInitRetry.getInjectConnectFailure() > 0){
+ TestObjectStoreInitRetry.decrementInjectConnectFailure();
+ RuntimeException re = new JDOCanRetryException();
+ LOG.debug("MISBEHAVE:" + TestObjectStoreInitRetry.getInjectConnectFailure(), re);
+ throw re;
+ }
+ }
+
+ // debug instrumenter - useful in finding which fns get called, and how often
+ static void debugTrace() {
+ if (LOG.isDebugEnabled()){
+ Exception e = new Exception();
+ LOG.debug("." + e.getStackTrace()[1].getLineNumber() + ":" + TestObjectStoreInitRetry.getInjectConnectFailure());
+ }
+ }
+
+ protected static Configuration conf;
+
+ @Test
+ public void testObjStoreRetry() throws Exception {
+ conf = MetastoreConf.newMetastoreConf();
+
+ MetastoreConf.setLongVar(conf, ConfVars.HMSHANDLERATTEMPTS, 4);
+ MetastoreConf.setTimeVar(conf, ConfVars.HMSHANDLERINTERVAL, 1, TimeUnit.SECONDS);
+ MetastoreConf.setVar(conf, ConfVars.CONNECTION_DRIVER,FakeDerby.class.getName());
+ MetastoreConf.setBoolVar(conf, ConfVars.TRY_DIRECT_SQL,true);
+ String jdbcUrl = MetastoreConf.getVar(conf, ConfVars.CONNECTURLKEY);
+ jdbcUrl = jdbcUrl.replace("derby","fderby");
+ MetastoreConf.setVar(conf, ConfVars.CONNECTURLKEY,jdbcUrl);
+ MetaStoreTestUtils.setConfForStandloneMode(conf);
+
+ FakeDerby fd = new FakeDerby();
+
+ ObjectStore objStore = new ObjectStore();
+
+ Exception savE = null;
+ try {
+ setInjectConnectFailure(5);
+ objStore.setConf(conf);
+ Assert.fail();
+ } catch (Exception e) {
+ LOG.info("Caught exception ", e);
+ savE = e;
+ }
+
+ /*
+ * A note on retries.
+ *
+ * We've configured a total of 4 attempts.
+ * 5 - 4 == 1 connect failure simulation count left after this.
+ */
+
+ assertEquals(1, getInjectConnectFailure());
+ assertNotNull(savE);
+
+ setInjectConnectFailure(0);
+ objStore.setConf(conf);
+ assertEquals(0, getInjectConnectFailure());
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
index bf8556d..6a44833 100644
--- a/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
+++ b/standalone-metastore/src/test/java/org/apache/hadoop/hive/metastore/TestOldSchema.java
@@ -89,9 +89,8 @@ public class TestOldSchema {
@Before
public void setUp() throws Exception {
Configuration conf = MetastoreConf.newMetastoreConf();
- MetastoreConf.setClass(conf, MetastoreConf.ConfVars.EXPRESSION_PROXY_CLASS,
- MockPartitionExpressionProxy.class, PartitionExpressionProxy.class);
MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.STATS_FETCH_BITVECTOR, false);
+ MetaStoreTestUtils.setConfForStandloneMode(conf);
store = new ObjectStore();
store.setConf(conf);