You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ga...@apache.org on 2018/01/16 21:36:33 UTC

[5/7] hive git commit: HIVE-17982 Move metastore specific itests. This closes #279. (Alan Gates, reviewed by Peter Vary)

http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
deleted file mode 100644
index a19cc86..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreTxns.java
+++ /dev/null
@@ -1,270 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore;
-
-import junit.framework.Assert;
-
-import org.apache.hadoop.hive.common.ValidTxnList;
-import org.apache.hadoop.hive.common.ValidReadTxnList;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.DataOperationType;
-import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
-import org.apache.hadoop.hive.metastore.api.LockResponse;
-import org.apache.hadoop.hive.metastore.api.LockState;
-import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
-import org.apache.log4j.Level;
-import org.apache.log4j.LogManager;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.util.List;
-
-/**
- * Unit tests for {@link org.apache.hadoop.hive.metastore.HiveMetaStoreClient}.  For now this just has
- * transaction and locking tests.  The goal here is not to test all
- * functionality possible through the interface, as all permutations of DB
- * operations should be tested in the appropriate DB handler classes.  The
- * goal is to test that we can properly pass the messages through the thrift
- * service.
- *
- * This is in the ql directory rather than the metastore directory because it
- * required the hive-exec jar, and hive-exec jar already depends on
- * hive-metastore jar, thus I can't make hive-metastore depend on hive-exec.
- */
-public class TestHiveMetaStoreTxns {
-
-  private final HiveConf conf = new HiveConf();
-  private IMetaStoreClient client;
-
-  public TestHiveMetaStoreTxns() throws Exception {
-    TxnDbUtil.setConfValues(conf);
-    LogManager.getRootLogger().setLevel(Level.DEBUG);
-    tearDown();
-  }
-
-  @Test
-  public void testTxns() throws Exception {
-    List<Long> tids = client.openTxns("me", 3).getTxn_ids();
-    Assert.assertEquals(1L, (long) tids.get(0));
-    Assert.assertEquals(2L, (long) tids.get(1));
-    Assert.assertEquals(3L, (long) tids.get(2));
-    client.rollbackTxn(1);
-    client.commitTxn(2);
-    ValidTxnList validTxns = client.getValidTxns();
-    Assert.assertFalse(validTxns.isTxnValid(1));
-    Assert.assertTrue(validTxns.isTxnValid(2));
-    Assert.assertFalse(validTxns.isTxnValid(3));
-    Assert.assertFalse(validTxns.isTxnValid(4));
-  }
-
-  @Test
-  public void testOpenTxnNotExcluded() throws Exception {
-    List<Long> tids = client.openTxns("me", 3).getTxn_ids();
-    Assert.assertEquals(1L, (long) tids.get(0));
-    Assert.assertEquals(2L, (long) tids.get(1));
-    Assert.assertEquals(3L, (long) tids.get(2));
-    client.rollbackTxn(1);
-    client.commitTxn(2);
-    ValidTxnList validTxns = client.getValidTxns(3);
-    Assert.assertFalse(validTxns.isTxnValid(1));
-    Assert.assertTrue(validTxns.isTxnValid(2));
-    Assert.assertTrue(validTxns.isTxnValid(3));
-    Assert.assertFalse(validTxns.isTxnValid(4));
-  }
-
-  @Test
-  public void testTxnRange() throws Exception {
-    ValidTxnList validTxns = client.getValidTxns();
-    Assert.assertEquals(ValidTxnList.RangeResponse.NONE,
-        validTxns.isTxnRangeValid(1L, 3L));
-    List<Long> tids = client.openTxns("me", 5).getTxn_ids();
-
-    HeartbeatTxnRangeResponse rsp = client.heartbeatTxnRange(1, 5);
-    Assert.assertEquals(0, rsp.getNosuch().size());
-    Assert.assertEquals(0, rsp.getAborted().size());
-
-    client.rollbackTxn(1L);
-    client.commitTxn(2L);
-    client.commitTxn(3L);
-    client.commitTxn(4L);
-    validTxns = client.getValidTxns();
-    System.out.println("validTxns = " + validTxns);
-    Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
-        validTxns.isTxnRangeValid(2L, 2L));
-    Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
-        validTxns.isTxnRangeValid(2L, 3L));
-    Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
-        validTxns.isTxnRangeValid(2L, 4L));
-    Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
-        validTxns.isTxnRangeValid(3L, 4L));
-
-    Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
-        validTxns.isTxnRangeValid(1L, 4L));
-    Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
-        validTxns.isTxnRangeValid(2L, 5L));
-    Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
-        validTxns.isTxnRangeValid(1L, 2L));
-    Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
-        validTxns.isTxnRangeValid(4L, 5L));
-
-    Assert.assertEquals(ValidTxnList.RangeResponse.NONE,
-        validTxns.isTxnRangeValid(1L, 1L));
-    Assert.assertEquals(ValidTxnList.RangeResponse.NONE,
-        validTxns.isTxnRangeValid(5L, 10L));
-
-    validTxns = new ValidReadTxnList("10:5:4,5,6:");
-    Assert.assertEquals(ValidTxnList.RangeResponse.NONE,
-        validTxns.isTxnRangeValid(4,6));
-    Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
-        validTxns.isTxnRangeValid(7, 10));
-    Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
-        validTxns.isTxnRangeValid(7, 11));
-    Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
-        validTxns.isTxnRangeValid(3, 6));
-    Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
-        validTxns.isTxnRangeValid(4, 7));
-    Assert.assertEquals(ValidTxnList.RangeResponse.SOME,
-        validTxns.isTxnRangeValid(1, 12));
-    Assert.assertEquals(ValidTxnList.RangeResponse.ALL,
-        validTxns.isTxnRangeValid(1, 3));
-  }
-
-  @Test
-  public void testLocks() throws Exception {
-    LockRequestBuilder rqstBuilder = new LockRequestBuilder();
-    rqstBuilder.addLockComponent(new LockComponentBuilder()
-        .setDbName("mydb")
-        .setTableName("mytable")
-        .setPartitionName("mypartition")
-        .setExclusive()
-        .setOperationType(DataOperationType.NO_TXN)
-        .build());
-    rqstBuilder.addLockComponent(new LockComponentBuilder()
-        .setDbName("mydb")
-        .setTableName("yourtable")
-        .setSemiShared()
-        .setOperationType(DataOperationType.NO_TXN)
-        .build());
-    rqstBuilder.addLockComponent(new LockComponentBuilder()
-        .setDbName("yourdb")
-        .setOperationType(DataOperationType.NO_TXN)
-        .setShared()
-        .build());
-    rqstBuilder.setUser("fred");
-
-    LockResponse res = client.lock(rqstBuilder.build());
-    Assert.assertEquals(1L, res.getLockid());
-    Assert.assertEquals(LockState.ACQUIRED, res.getState());
-
-    res = client.checkLock(1);
-    Assert.assertEquals(1L, res.getLockid());
-    Assert.assertEquals(LockState.ACQUIRED, res.getState());
-
-    client.heartbeat(0, 1);
-
-    client.unlock(1);
-  }
-
-  @Test
-  public void testLocksWithTxn() throws Exception {
-    long txnid = client.openTxn("me");
-
-    LockRequestBuilder rqstBuilder = new LockRequestBuilder();
-    rqstBuilder.setTransactionId(txnid)
-      .addLockComponent(new LockComponentBuilder()
-        .setDbName("mydb")
-        .setTableName("mytable")
-        .setPartitionName("mypartition")
-        .setSemiShared()
-        .setOperationType(DataOperationType.UPDATE)
-        .build())
-      .addLockComponent(new LockComponentBuilder()
-        .setDbName("mydb")
-        .setTableName("yourtable")
-        .setSemiShared()
-        .setOperationType(DataOperationType.UPDATE)
-        .build())
-      .addLockComponent(new LockComponentBuilder()
-        .setDbName("yourdb")
-        .setShared()
-        .setOperationType(DataOperationType.SELECT)
-        .build())
-      .setUser("fred");
-
-    LockResponse res = client.lock(rqstBuilder.build());
-    Assert.assertEquals(1L, res.getLockid());
-    Assert.assertEquals(LockState.ACQUIRED, res.getState());
-
-    res = client.checkLock(1);
-    Assert.assertEquals(1L, res.getLockid());
-    Assert.assertEquals(LockState.ACQUIRED, res.getState());
-
-    client.heartbeat(txnid, 1);
-
-    client.commitTxn(txnid);
-  }
-
-  @Test
-  public void stringifyValidTxns() throws Exception {
-    // Test with just high water mark
-    ValidTxnList validTxns = new ValidReadTxnList("1:" + Long.MAX_VALUE + "::");
-    String asString = validTxns.toString();
-    Assert.assertEquals("1:" + Long.MAX_VALUE + "::", asString);
-    validTxns = new ValidReadTxnList(asString);
-    Assert.assertEquals(1, validTxns.getHighWatermark());
-    Assert.assertNotNull(validTxns.getInvalidTransactions());
-    Assert.assertEquals(0, validTxns.getInvalidTransactions().length);
-    asString = validTxns.toString();
-    Assert.assertEquals("1:" + Long.MAX_VALUE + "::", asString);
-    validTxns = new ValidReadTxnList(asString);
-    Assert.assertEquals(1, validTxns.getHighWatermark());
-    Assert.assertNotNull(validTxns.getInvalidTransactions());
-    Assert.assertEquals(0, validTxns.getInvalidTransactions().length);
-
-    // Test with open transactions
-    validTxns = new ValidReadTxnList("10:3:5:3");
-    asString = validTxns.toString();
-    if (!asString.equals("10:3:3:5") && !asString.equals("10:3:5:3")) {
-      Assert.fail("Unexpected string value " + asString);
-    }
-    validTxns = new ValidReadTxnList(asString);
-    Assert.assertEquals(10, validTxns.getHighWatermark());
-    Assert.assertNotNull(validTxns.getInvalidTransactions());
-    Assert.assertEquals(2, validTxns.getInvalidTransactions().length);
-    boolean sawThree = false, sawFive = false;
-    for (long tid : validTxns.getInvalidTransactions()) {
-      if (tid == 3)  sawThree = true;
-      else if (tid == 5) sawFive = true;
-      else  Assert.fail("Unexpected value " + tid);
-    }
-    Assert.assertTrue(sawThree);
-    Assert.assertTrue(sawFive);
-  }
-
-  @Before
-  public void setUp() throws Exception {
-    TxnDbUtil.prepDb(conf);
-    client = new HiveMetaStoreClient(conf);
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    TxnDbUtil.cleanDb(conf);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java
deleted file mode 100644
index c29a34d..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestHiveMetaStoreWithEnvironmentContext.java
+++ /dev/null
@@ -1,219 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.hive.cli.CliSessionState;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.SerDeInfo;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
-import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
-import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
-import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
-import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.DropTableEvent;
-import org.apache.hadoop.hive.metastore.events.ListenerEvent;
-import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-import org.apache.hadoop.hive.ql.io.HiveInputFormat;
-import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
-import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
-
-/**
- * TestHiveMetaStoreWithEnvironmentContext. Test case for _with_environment_context
- * calls in {@link org.apache.hadoop.hive.metastore.HiveMetaStore}
- */
-public class TestHiveMetaStoreWithEnvironmentContext extends TestCase {
-
-  private HiveConf hiveConf;
-  private HiveMetaStoreClient msc;
-  private EnvironmentContext envContext;
-  private final Database db = new Database();
-  private Table table = new Table();
-  private final Partition partition = new Partition();
-
-  private static final String dbName = "hive3252";
-  private static final String tblName = "tmptbl";
-  private static final String renamed = "tmptbl2";
-
-  @Override
-  protected void setUp() throws Exception {
-    super.setUp();
-
-    System.setProperty("hive.metastore.event.listeners",
-        DummyListener.class.getName());
-
-    int port = MetaStoreTestUtils.startMetaStoreWithRetry();
-
-    hiveConf = new HiveConf(this.getClass());
-    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
-    hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
-    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-    SessionState.start(new CliSessionState(hiveConf));
-    msc = new HiveMetaStoreClient(hiveConf);
-
-    msc.dropDatabase(dbName, true, true);
-
-    Map<String, String> envProperties = new HashMap<String, String>();
-    envProperties.put("hadoop.job.ugi", "test_user");
-    envContext = new EnvironmentContext(envProperties);
-
-    db.setName(dbName);
-
-    Map<String, String> tableParams = new HashMap<String, String>();
-    tableParams.put("a", "string");
-    List<FieldSchema> partitionKeys = new ArrayList<FieldSchema>();
-    partitionKeys.add(new FieldSchema("b", "string", ""));
-
-    List<FieldSchema> cols = new ArrayList<FieldSchema>();
-    cols.add(new FieldSchema("a", "string", ""));
-    cols.add(new FieldSchema("b", "string", ""));
-    StorageDescriptor sd = new StorageDescriptor();
-    sd.setCols(cols);
-    sd.setCompressed(false);
-    sd.setParameters(tableParams);
-    sd.setSerdeInfo(new SerDeInfo());
-    sd.getSerdeInfo().setName(tblName);
-    sd.getSerdeInfo().setParameters(new HashMap<String, String>());
-    sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
-    sd.getSerdeInfo().setSerializationLib(LazySimpleSerDe.class.getName());
-    sd.setInputFormat(HiveInputFormat.class.getName());
-    sd.setOutputFormat(HiveOutputFormat.class.getName());
-
-    table.setDbName(dbName);
-    table.setTableName(tblName);
-    table.setParameters(tableParams);
-    table.setPartitionKeys(partitionKeys);
-    table.setSd(sd);
-
-    List<String> partValues = new ArrayList<String>();
-    partValues.add("2011");
-    partition.setDbName(dbName);
-    partition.setTableName(tblName);
-    partition.setValues(partValues);
-    partition.setSd(table.getSd().deepCopy());
-    partition.getSd().setSerdeInfo(table.getSd().getSerdeInfo().deepCopy());
-
-    DummyListener.notifyList.clear();
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-    super.tearDown();
-  }
-
-  public void testEnvironmentContext() throws Exception {
-    int listSize = 0;
-
-    List<ListenerEvent> notifyList = DummyListener.notifyList;
-    assertEquals(notifyList.size(), listSize);
-    msc.createDatabase(db);
-    listSize++;
-    assertEquals(listSize, notifyList.size());
-    CreateDatabaseEvent dbEvent = (CreateDatabaseEvent)(notifyList.get(listSize - 1));
-    assert dbEvent.getStatus();
-
-    msc.createTable(table, envContext);
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    CreateTableEvent tblEvent = (CreateTableEvent)(notifyList.get(listSize - 1));
-    assert tblEvent.getStatus();
-    assertEquals(envContext, tblEvent.getEnvironmentContext());
-
-    table = msc.getTable(dbName, tblName);
-
-    partition.getSd().setLocation(table.getSd().getLocation() + "/part1");
-    msc.add_partition(partition, envContext);
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    AddPartitionEvent partEvent = (AddPartitionEvent)(notifyList.get(listSize-1));
-    assert partEvent.getStatus();
-    assertEquals(envContext, partEvent.getEnvironmentContext());
-
-    List<String> partVals = new ArrayList<String>();
-    partVals.add("2012");
-    msc.appendPartition(dbName, tblName, partVals, envContext);
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    AddPartitionEvent appendPartEvent = (AddPartitionEvent)(notifyList.get(listSize-1));
-    assert appendPartEvent.getStatus();
-    assertEquals(envContext, appendPartEvent.getEnvironmentContext());
-
-    table.setTableName(renamed);
-    msc.alter_table_with_environmentContext(dbName, tblName, table, envContext);
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    AlterTableEvent alterTableEvent = (AlterTableEvent) notifyList.get(listSize-1);
-    assert alterTableEvent.getStatus();
-    assertEquals(envContext, alterTableEvent.getEnvironmentContext());
-
-    table.setTableName(tblName);
-    msc.alter_table_with_environmentContext(dbName, renamed, table, envContext);
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-
-    List<String> dropPartVals = new ArrayList<String>();
-    dropPartVals.add("2011");
-    msc.dropPartition(dbName, tblName, dropPartVals, envContext);
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    DropPartitionEvent dropPartEvent = (DropPartitionEvent)notifyList.get(listSize - 1);
-    assert dropPartEvent.getStatus();
-    assertEquals(envContext, dropPartEvent.getEnvironmentContext());
-
-    msc.dropPartition(dbName, tblName, "b=2012", true, envContext);
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    DropPartitionEvent dropPartByNameEvent = (DropPartitionEvent)notifyList.get(listSize - 1);
-    assert dropPartByNameEvent.getStatus();
-    assertEquals(envContext, dropPartByNameEvent.getEnvironmentContext());
-
-    msc.dropTable(dbName, tblName, true, false, envContext);
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    DropTableEvent dropTblEvent = (DropTableEvent)notifyList.get(listSize-1);
-    assert dropTblEvent.getStatus();
-    assertEquals(envContext, dropTblEvent.getEnvironmentContext());
-
-    msc.dropDatabase(dbName);
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-
-    DropDatabaseEvent dropDB = (DropDatabaseEvent)notifyList.get(listSize-1);
-    assert dropDB.getStatus();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java
deleted file mode 100644
index 7b3a896..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import java.util.HashMap;
-import java.util.Map;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.hive.cli.CliSessionState;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.PartitionEventType;
-import org.apache.hadoop.hive.metastore.api.UnknownDBException;
-import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
-import org.apache.hadoop.hive.metastore.api.UnknownTableException;
-import org.apache.hadoop.hive.ql.CommandNeedRetryException;
-import org.apache.hadoop.hive.ql.DriverFactory;
-import org.apache.hadoop.hive.ql.IDriver;
-import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.thrift.TException;
-
-public class TestMarkPartition extends TestCase{
-
-  protected HiveConf hiveConf;
-  private IDriver driver;
-
-  @Override
-  protected void setUp() throws Exception {
-
-    super.setUp();
-    System.setProperty("hive.metastore.event.clean.freq", "2");
-    System.setProperty("hive.metastore.event.expiry.duration", "5");
-    hiveConf = new HiveConf(this.getClass());
-    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-    SessionState.start(new CliSessionState(hiveConf));
-
-  }
-
-  public void testMarkingPartitionSet() throws CommandNeedRetryException, MetaException,
-  TException, NoSuchObjectException, UnknownDBException, UnknownTableException,
-  InvalidPartitionException, UnknownPartitionException, InterruptedException {
-    HiveMetaStoreClient msc = new HiveMetaStoreClient(hiveConf);
-    driver = DriverFactory.newDriver(hiveConf);
-    driver.run("drop database if exists hive2215 cascade");
-    driver.run("create database hive2215");
-    driver.run("use hive2215");
-    driver.run("drop table if exists tmptbl");
-    driver.run("create table tmptbl (a string) partitioned by (b string)");
-    driver.run("alter table tmptbl add partition (b='2011')");
-    Map<String,String> kvs = new HashMap<String, String>();
-    kvs.put("b", "'2011'");
-    msc.markPartitionForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
-    assert msc.isPartitionMarkedForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
-    Thread.sleep(10000);
-    assert !msc.isPartitionMarkedForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
-
-    kvs.put("b", "'2012'");
-    assert !msc.isPartitionMarkedForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
-    try{
-      msc.markPartitionForEvent("hive2215", "tmptbl2", kvs, PartitionEventType.LOAD_DONE);
-      assert false;
-    } catch(Exception e){
-      assert e instanceof UnknownTableException;
-    }
-    try{
-      msc.isPartitionMarkedForEvent("hive2215", "tmptbl2", kvs, PartitionEventType.LOAD_DONE);
-      assert false;
-    } catch(Exception e){
-      assert e instanceof UnknownTableException;
-    }
-    kvs.put("a", "'2012'");
-    try{
-      msc.isPartitionMarkedForEvent("hive2215", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
-      assert false;
-    } catch(Exception e){
-      assert e instanceof InvalidPartitionException;
-    }
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-    driver.run("drop database if exists hive2215 cascade");
-    super.tearDown();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java
deleted file mode 100644
index c541193..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartitionRemote.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-
-public class TestMarkPartitionRemote extends TestMarkPartition {
-
-  @Override
-  protected void setUp() throws Exception {
-    super.setUp();
-    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + MetaStoreTestUtils.startMetaStore());
-    hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java
deleted file mode 100644
index 1ca18b9..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEndFunctionListener.java
+++ /dev/null
@@ -1,142 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-
-
-import org.apache.hadoop.hive.cli.CliSessionState;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.ql.DriverFactory;
-import org.apache.hadoop.hive.ql.IDriver;
-import org.apache.hadoop.hive.ql.session.SessionState;
-
-import junit.framework.TestCase;
-
-/**
- * TestMetaStoreEventListener. Test case for
- * {@link org.apache.hadoop.hive.metastore.MetaStoreEndFunctionListener}
- */
-public class TestMetaStoreEndFunctionListener extends TestCase {
-  private HiveConf hiveConf;
-  private HiveMetaStoreClient msc;
-  private IDriver driver;
-
-  @Override
-  protected void setUp() throws Exception {
-
-    super.setUp();
-    System.setProperty("hive.metastore.event.listeners",
-        DummyListener.class.getName());
-    System.setProperty("hive.metastore.pre.event.listeners",
-        DummyPreListener.class.getName());
-    System.setProperty("hive.metastore.end.function.listeners",
-        DummyEndFunctionListener.class.getName());
-    int port = MetaStoreTestUtils.startMetaStoreWithRetry();
-    hiveConf = new HiveConf(this.getClass());
-    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
-    hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
-    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-    SessionState.start(new CliSessionState(hiveConf));
-    msc = new HiveMetaStoreClient(hiveConf);
-    driver = DriverFactory.newDriver(hiveConf);
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-    super.tearDown();
-  }
-
-  public void testEndFunctionListener() throws Exception {
-    /* Objective here is to ensure that when exceptions are thrown in HiveMetaStore in API methods
-     * they bubble up and are stored in the MetaStoreEndFunctionContext objects
-     */
-    String dbName = "hive3524";
-    String tblName = "tmptbl";
-    int listSize = 0;
-
-    driver.run("create database " + dbName);
-
-    try {
-      msc.getDatabase("UnknownDB");
-    }
-    catch (Exception e) {
-    }
-    listSize = DummyEndFunctionListener.funcNameList.size();
-    String func_name = DummyEndFunctionListener.funcNameList.get(listSize-1);
-    MetaStoreEndFunctionContext context = DummyEndFunctionListener.contextList.get(listSize-1);
-    assertEquals(func_name,"get_database");
-    assertFalse(context.isSuccess());
-    Exception e = context.getException();
-    assertTrue((e!=null));
-    assertTrue((e instanceof NoSuchObjectException));
-    assertEquals(context.getInputTableName(), null);
-
-    driver.run("use " + dbName);
-    driver.run(String.format("create table %s (a string) partitioned by (b string)", tblName));
-    String tableName = "Unknown";
-    try {
-      msc.getTable(dbName, tableName);
-    }
-    catch (Exception e1) {
-    }
-    listSize = DummyEndFunctionListener.funcNameList.size();
-    func_name = DummyEndFunctionListener.funcNameList.get(listSize-1);
-    context = DummyEndFunctionListener.contextList.get(listSize-1);
-    assertEquals(func_name,"get_table");
-    assertFalse(context.isSuccess());
-    e = context.getException();
-    assertTrue((e!=null));
-    assertTrue((e instanceof NoSuchObjectException));
-    assertEquals(context.getInputTableName(), tableName);
-
-    try {
-      msc.getPartition("hive3524", tblName, "b=2012");
-    }
-    catch (Exception e2) {
-    }
-    listSize = DummyEndFunctionListener.funcNameList.size();
-    func_name = DummyEndFunctionListener.funcNameList.get(listSize-1);
-    context = DummyEndFunctionListener.contextList.get(listSize-1);
-    assertEquals(func_name,"get_partition_by_name");
-    assertFalse(context.isSuccess());
-    e = context.getException();
-    assertTrue((e!=null));
-    assertTrue((e instanceof NoSuchObjectException));
-    assertEquals(context.getInputTableName(), tblName);
-    try {
-      driver.run("drop table Unknown");
-    }
-    catch (Exception e4) {
-    }
-    listSize = DummyEndFunctionListener.funcNameList.size();
-    func_name = DummyEndFunctionListener.funcNameList.get(listSize-1);
-    context = DummyEndFunctionListener.contextList.get(listSize-1);
-    assertEquals(func_name,"get_table");
-    assertFalse(context.isSuccess());
-    e = context.getException();
-    assertTrue((e!=null));
-    assertTrue((e instanceof NoSuchObjectException));
-    assertEquals(context.getInputTableName(), "Unknown");
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
deleted file mode 100644
index 358e5d1..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListener.java
+++ /dev/null
@@ -1,526 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.hadoop.hive.cli.CliSessionState;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionEventType;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.events.AddIndexEvent;
-import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.AlterIndexEvent;
-import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
-import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent;
-import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
-import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
-import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
-import org.apache.hadoop.hive.metastore.events.DropIndexEvent;
-import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.DropTableEvent;
-import org.apache.hadoop.hive.metastore.events.ListenerEvent;
-import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
-import org.apache.hadoop.hive.metastore.events.PreAddIndexEvent;
-import org.apache.hadoop.hive.metastore.events.PreAddPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.PreAlterIndexEvent;
-import org.apache.hadoop.hive.metastore.events.PreAlterPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent;
-import org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent;
-import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent;
-import org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent;
-import org.apache.hadoop.hive.metastore.events.PreDropIndexEvent;
-import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent;
-import org.apache.hadoop.hive.metastore.events.PreDropTableEvent;
-import org.apache.hadoop.hive.metastore.events.PreEventContext;
-import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent;
-import org.apache.hadoop.hive.ql.DriverFactory;
-import org.apache.hadoop.hive.ql.IDriver;
-import org.apache.hadoop.hive.ql.processors.SetProcessor;
-import org.apache.hadoop.hive.ql.session.SessionState;
-
-import com.google.common.collect.Lists;
-
-import junit.framework.TestCase;
-
-/**
- * TestMetaStoreEventListener. Test case for
- * {@link org.apache.hadoop.hive.metastore.MetaStoreEventListener} and
- * {@link org.apache.hadoop.hive.metastore.MetaStorePreEventListener}
- */
-public class TestMetaStoreEventListener extends TestCase {
-  private HiveConf hiveConf;
-  private HiveMetaStoreClient msc;
-  private IDriver driver;
-
-  private static final String dbName = "hive2038";
-  private static final String tblName = "tmptbl";
-  private static final String renamed = "tmptbl2";
-  private static final String metaConfKey = "hive.metastore.partition.name.whitelist.pattern";
-  private static final String metaConfVal = "";
-
-  @Override
-  protected void setUp() throws Exception {
-
-    super.setUp();
-
-    System.setProperty("hive.metastore.event.listeners",
-        DummyListener.class.getName());
-    System.setProperty("hive.metastore.pre.event.listeners",
-        DummyPreListener.class.getName());
-
-    hiveConf = new HiveConf(this.getClass());
-
-    hiveConf.setVar(HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN, metaConfVal);
-    int port = MetaStoreTestUtils.startMetaStoreWithRetry(hiveConf);
-
-    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
-    hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
-    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-    hiveConf.set(HiveConf.ConfVars.HIVE_TXN_MANAGER.varname,
-        "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager");
-    SessionState.start(new CliSessionState(hiveConf));
-    msc = new HiveMetaStoreClient(hiveConf);
-    driver = DriverFactory.newDriver(hiveConf);
-
-    driver.run("drop database if exists " + dbName + " cascade");
-
-    DummyListener.notifyList.clear();
-    DummyPreListener.notifyList.clear();
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-    super.tearDown();
-  }
-
-  private void validateCreateDb(Database expectedDb, Database actualDb) {
-    assertEquals(expectedDb.getName(), actualDb.getName());
-    assertEquals(expectedDb.getLocationUri(), actualDb.getLocationUri());
-  }
-
-  private void validateTable(Table expectedTable, Table actualTable) {
-    assertEquals(expectedTable.getTableName(), actualTable.getTableName());
-    assertEquals(expectedTable.getDbName(), actualTable.getDbName());
-    assertEquals(expectedTable.getSd().getLocation(), actualTable.getSd().getLocation());
-  }
-
-  private void validateCreateTable(Table expectedTable, Table actualTable) {
-    validateTable(expectedTable, actualTable);
-  }
-
-  private void validateAddPartition(Partition expectedPartition, Partition actualPartition) {
-    assertEquals(expectedPartition, actualPartition);
-  }
-
-  private void validateTableInAddPartition(Table expectedTable, Table actualTable) {
-    assertEquals(expectedTable, actualTable);
-  }
-
-  private void validatePartition(Partition expectedPartition, Partition actualPartition) {
-    assertEquals(expectedPartition.getValues(), actualPartition.getValues());
-    assertEquals(expectedPartition.getDbName(), actualPartition.getDbName());
-    assertEquals(expectedPartition.getTableName(), actualPartition.getTableName());
-  }
-
-  private void validateAlterPartition(Partition expectedOldPartition,
-      Partition expectedNewPartition, String actualOldPartitionDbName,
-      String actualOldPartitionTblName,List<String> actualOldPartitionValues,
-      Partition actualNewPartition) {
-    assertEquals(expectedOldPartition.getValues(), actualOldPartitionValues);
-    assertEquals(expectedOldPartition.getDbName(), actualOldPartitionDbName);
-    assertEquals(expectedOldPartition.getTableName(), actualOldPartitionTblName);
-
-    validatePartition(expectedNewPartition, actualNewPartition);
-  }
-
-  private void validateAlterTable(Table expectedOldTable, Table expectedNewTable,
-      Table actualOldTable, Table actualNewTable) {
-    validateTable(expectedOldTable, actualOldTable);
-    validateTable(expectedNewTable, actualNewTable);
-  }
-
-  private void validateAlterTableColumns(Table expectedOldTable, Table expectedNewTable,
-      Table actualOldTable, Table actualNewTable) {
-    validateAlterTable(expectedOldTable, expectedNewTable, actualOldTable, actualNewTable);
-
-    assertEquals(expectedOldTable.getSd().getCols(), actualOldTable.getSd().getCols());
-    assertEquals(expectedNewTable.getSd().getCols(), actualNewTable.getSd().getCols());
-  }
-
-  private void validateLoadPartitionDone(String expectedTableName,
-      Map<String,String> expectedPartitionName, String actualTableName,
-      Map<String,String> actualPartitionName) {
-    assertEquals(expectedPartitionName, actualPartitionName);
-    assertEquals(expectedTableName, actualTableName);
-  }
-
-  private void validateDropPartition(Iterator<Partition> expectedPartitions, Iterator<Partition> actualPartitions) {
-    while (expectedPartitions.hasNext()){
-      assertTrue(actualPartitions.hasNext());
-      validatePartition(expectedPartitions.next(), actualPartitions.next());
-    }
-    assertFalse(actualPartitions.hasNext());
-  }
-
-  private void validateTableInDropPartition(Table expectedTable, Table actualTable) {
-    validateTable(expectedTable, actualTable);
-  }
-
-  private void validateDropTable(Table expectedTable, Table actualTable) {
-    validateTable(expectedTable, actualTable);
-  }
-
-  private void validateDropDb(Database expectedDb, Database actualDb) {
-    assertEquals(expectedDb, actualDb);
-  }
-
-  private void validateIndex(Index expectedIndex, Index actualIndex) {
-    assertEquals(expectedIndex.getDbName(), actualIndex.getDbName());
-    assertEquals(expectedIndex.getIndexName(), actualIndex.getIndexName());
-    assertEquals(expectedIndex.getIndexHandlerClass(), actualIndex.getIndexHandlerClass());
-    assertEquals(expectedIndex.getOrigTableName(), actualIndex.getOrigTableName());
-    assertEquals(expectedIndex.getIndexTableName(), actualIndex.getIndexTableName());
-    assertEquals(expectedIndex.getSd().getLocation(), actualIndex.getSd().getLocation());
-  }
-
-  private void validateAddIndex(Index expectedIndex, Index actualIndex) {
-    validateIndex(expectedIndex, actualIndex);
-  }
-
-  private void validateAlterIndex(Index expectedOldIndex, Index actualOldIndex,
-      Index expectedNewIndex, Index actualNewIndex) {
-    validateIndex(expectedOldIndex, actualOldIndex);
-    validateIndex(expectedNewIndex, actualNewIndex);
-  }
-
-  private void validateDropIndex(Index expectedIndex, Index actualIndex) {
-    validateIndex(expectedIndex, actualIndex);
-  }
-
-  public void testListener() throws Exception {
-    int listSize = 0;
-
-    List<ListenerEvent> notifyList = DummyListener.notifyList;
-    List<PreEventContext> preNotifyList = DummyPreListener.notifyList;
-    assertEquals(notifyList.size(), listSize);
-    assertEquals(preNotifyList.size(), listSize);
-
-    driver.run("create database " + dbName);
-    listSize++;
-    PreCreateDatabaseEvent preDbEvent = (PreCreateDatabaseEvent)(preNotifyList.get(preNotifyList.size() - 1));
-    Database db = msc.getDatabase(dbName);
-    assertEquals(listSize, notifyList.size());
-    assertEquals(listSize + 1, preNotifyList.size());
-    validateCreateDb(db, preDbEvent.getDatabase());
-
-    CreateDatabaseEvent dbEvent = (CreateDatabaseEvent)(notifyList.get(listSize - 1));
-    assert dbEvent.getStatus();
-    validateCreateDb(db, dbEvent.getDatabase());
-
-
-    driver.run("use " + dbName);
-    driver.run(String.format("create table %s (a string) partitioned by (b string)", tblName));
-    PreCreateTableEvent preTblEvent = (PreCreateTableEvent)(preNotifyList.get(preNotifyList.size() - 1));
-    listSize++;
-    Table tbl = msc.getTable(dbName, tblName);
-    validateCreateTable(tbl, preTblEvent.getTable());
-    assertEquals(notifyList.size(), listSize);
-
-    CreateTableEvent tblEvent = (CreateTableEvent)(notifyList.get(listSize - 1));
-    assert tblEvent.getStatus();
-    validateCreateTable(tbl, tblEvent.getTable());
-
-    driver.run("create index tmptbl_i on table tmptbl(a) as 'compact' " +
-        "WITH DEFERRED REBUILD IDXPROPERTIES ('prop1'='val1', 'prop2'='val2')");
-    listSize += 2;  // creates index table internally
-    assertEquals(notifyList.size(), listSize);
-
-    AddIndexEvent addIndexEvent = (AddIndexEvent)notifyList.get(listSize - 1);
-    assert addIndexEvent.getStatus();
-    PreAddIndexEvent preAddIndexEvent = (PreAddIndexEvent)(preNotifyList.get(preNotifyList.size() - 3));
-
-    Index oldIndex = msc.getIndex(dbName, "tmptbl", "tmptbl_i");
-
-    validateAddIndex(oldIndex, addIndexEvent.getIndex());
-
-    validateAddIndex(oldIndex, preAddIndexEvent.getIndex());
-
-    driver.run("alter index tmptbl_i on tmptbl set IDXPROPERTIES " +
-        "('prop1'='val1_new', 'prop3'='val3')");
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-
-    Index newIndex = msc.getIndex(dbName, "tmptbl", "tmptbl_i");
-
-    AlterIndexEvent alterIndexEvent = (AlterIndexEvent) notifyList.get(listSize - 1);
-    assert alterIndexEvent.getStatus();
-    validateAlterIndex(oldIndex, alterIndexEvent.getOldIndex(),
-        newIndex, alterIndexEvent.getNewIndex());
-
-    PreAlterIndexEvent preAlterIndexEvent = (PreAlterIndexEvent) (preNotifyList.get(preNotifyList.size() - 1));
-    validateAlterIndex(oldIndex, preAlterIndexEvent.getOldIndex(),
-        newIndex, preAlterIndexEvent.getNewIndex());
-
-    driver.run("drop index tmptbl_i on tmptbl");
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-
-    DropIndexEvent dropIndexEvent = (DropIndexEvent) notifyList.get(listSize - 1);
-    assert dropIndexEvent.getStatus();
-    validateDropIndex(newIndex, dropIndexEvent.getIndex());
-
-    PreDropIndexEvent preDropIndexEvent = (PreDropIndexEvent) (preNotifyList.get(preNotifyList.size() - 1));
-    validateDropIndex(newIndex, preDropIndexEvent.getIndex());
-
-    driver.run("alter table tmptbl add partition (b='2011')");
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    PreAddPartitionEvent prePartEvent = (PreAddPartitionEvent)(preNotifyList.get(preNotifyList.size() - 1));
-
-    AddPartitionEvent partEvent = (AddPartitionEvent)(notifyList.get(listSize-1));
-    assert partEvent.getStatus();
-    Partition part = msc.getPartition("hive2038", "tmptbl", "b=2011");
-    Partition partAdded = partEvent.getPartitionIterator().next();
-    validateAddPartition(part, partAdded);
-    validateTableInAddPartition(tbl, partEvent.getTable());
-    validateAddPartition(part, prePartEvent.getPartitions().get(0));
-
-    // Test adding multiple partitions in a single partition-set, atomically.
-    int currentTime = (int)System.currentTimeMillis();
-    HiveMetaStoreClient hmsClient = new HiveMetaStoreClient(hiveConf);
-    Table table = hmsClient.getTable(dbName, "tmptbl");
-    Partition partition1 = new Partition(Arrays.asList("20110101"), dbName, "tmptbl", currentTime,
-                                        currentTime, table.getSd(), table.getParameters());
-    Partition partition2 = new Partition(Arrays.asList("20110102"), dbName, "tmptbl", currentTime,
-                                        currentTime, table.getSd(), table.getParameters());
-    Partition partition3 = new Partition(Arrays.asList("20110103"), dbName, "tmptbl", currentTime,
-                                        currentTime, table.getSd(), table.getParameters());
-    hmsClient.add_partitions(Arrays.asList(partition1, partition2, partition3));
-    ++listSize;
-    AddPartitionEvent multiplePartitionEvent = (AddPartitionEvent)(notifyList.get(listSize-1));
-    assertEquals("Unexpected table value.", table, multiplePartitionEvent.getTable());
-    List<Partition> multiParts = Lists.newArrayList(multiplePartitionEvent.getPartitionIterator());
-    assertEquals("Unexpected number of partitions in event!", 3, multiParts.size());
-    assertEquals("Unexpected partition value.", partition1.getValues(), multiParts.get(0).getValues());
-    assertEquals("Unexpected partition value.", partition2.getValues(), multiParts.get(1).getValues());
-    assertEquals("Unexpected partition value.", partition3.getValues(), multiParts.get(2).getValues());
-
-    driver.run(String.format("alter table %s touch partition (%s)", tblName, "b='2011'"));
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    PreAlterPartitionEvent preAlterPartEvent =
-        (PreAlterPartitionEvent)preNotifyList.get(preNotifyList.size() - 1);
-
-    //the partition did not change,
-    // so the new partition should be similar to the original partition
-    Partition origP = msc.getPartition(dbName, tblName, "b=2011");
-
-    AlterPartitionEvent alterPartEvent = (AlterPartitionEvent)notifyList.get(listSize - 1);
-    assert alterPartEvent.getStatus();
-    validateAlterPartition(origP, origP, alterPartEvent.getOldPartition().getDbName(),
-        alterPartEvent.getOldPartition().getTableName(),
-        alterPartEvent.getOldPartition().getValues(), alterPartEvent.getNewPartition());
-
-
-    validateAlterPartition(origP, origP, preAlterPartEvent.getDbName(),
-        preAlterPartEvent.getTableName(), preAlterPartEvent.getNewPartition().getValues(),
-        preAlterPartEvent.getNewPartition());
-
-    List<String> part_vals = new ArrayList<String>();
-    part_vals.add("c=2012");
-    int preEventListSize;
-    preEventListSize = preNotifyList.size() + 1;
-    Partition newPart = msc.appendPartition(dbName, tblName, part_vals);
-
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    assertEquals(preNotifyList.size(), preEventListSize);
-
-    AddPartitionEvent appendPartEvent =
-        (AddPartitionEvent)(notifyList.get(listSize-1));
-    Partition partAppended = appendPartEvent.getPartitionIterator().next();
-    validateAddPartition(newPart, partAppended);
-
-    PreAddPartitionEvent preAppendPartEvent =
-        (PreAddPartitionEvent)(preNotifyList.get(preNotifyList.size() - 1));
-    validateAddPartition(newPart, preAppendPartEvent.getPartitions().get(0));
-
-    driver.run(String.format("alter table %s rename to %s", tblName, renamed));
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    PreAlterTableEvent preAlterTableE = (PreAlterTableEvent) preNotifyList.get(preNotifyList.size() - 1);
-
-    Table renamedTable = msc.getTable(dbName, renamed);
-
-    AlterTableEvent alterTableE = (AlterTableEvent) notifyList.get(listSize-1);
-    assert alterTableE.getStatus();
-    validateAlterTable(tbl, renamedTable, alterTableE.getOldTable(), alterTableE.getNewTable());
-    validateAlterTable(tbl, renamedTable, preAlterTableE.getOldTable(),
-        preAlterTableE.getNewTable());
-
-    //change the table name back
-    driver.run(String.format("alter table %s rename to %s", renamed, tblName));
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-
-    driver.run(String.format("alter table %s ADD COLUMNS (c int)", tblName));
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    preAlterTableE = (PreAlterTableEvent) preNotifyList.get(preNotifyList.size() - 1);
-
-    Table altTable = msc.getTable(dbName, tblName);
-
-    alterTableE = (AlterTableEvent) notifyList.get(listSize-1);
-    assert alterTableE.getStatus();
-    validateAlterTableColumns(tbl, altTable, alterTableE.getOldTable(), alterTableE.getNewTable());
-    validateAlterTableColumns(tbl, altTable, preAlterTableE.getOldTable(),
-        preAlterTableE.getNewTable());
-
-    Map<String,String> kvs = new HashMap<String, String>(1);
-    kvs.put("b", "2011");
-    msc.markPartitionForEvent("hive2038", "tmptbl", kvs, PartitionEventType.LOAD_DONE);
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-
-    LoadPartitionDoneEvent partMarkEvent = (LoadPartitionDoneEvent)notifyList.get(listSize - 1);
-    assert partMarkEvent.getStatus();
-    validateLoadPartitionDone("tmptbl", kvs, partMarkEvent.getTable().getTableName(),
-        partMarkEvent.getPartitionName());
-
-    PreLoadPartitionDoneEvent prePartMarkEvent =
-        (PreLoadPartitionDoneEvent)preNotifyList.get(preNotifyList.size() - 1);
-    validateLoadPartitionDone("tmptbl", kvs, prePartMarkEvent.getTableName(),
-        prePartMarkEvent.getPartitionName());
-
-    driver.run(String.format("alter table %s drop partition (b='2011')", tblName));
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    PreDropPartitionEvent preDropPart = (PreDropPartitionEvent) preNotifyList.get(preNotifyList
-        .size() - 1);
-
-    DropPartitionEvent dropPart = (DropPartitionEvent)notifyList.get(listSize - 1);
-    assert dropPart.getStatus();
-    validateDropPartition(Collections.singletonList(part).iterator(), dropPart.getPartitionIterator());
-    validateTableInDropPartition(tbl, dropPart.getTable());
-
-    validateDropPartition(Collections.singletonList(part).iterator(), preDropPart.getPartitionIterator());
-    validateTableInDropPartition(tbl, preDropPart.getTable());
-
-    driver.run("drop table " + tblName);
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    PreDropTableEvent preDropTbl = (PreDropTableEvent)preNotifyList.get(preNotifyList.size() - 1);
-
-    DropTableEvent dropTbl = (DropTableEvent)notifyList.get(listSize-1);
-    assert dropTbl.getStatus();
-    validateDropTable(tbl, dropTbl.getTable());
-    validateDropTable(tbl, preDropTbl.getTable());
-
-    driver.run("drop database " + dbName);
-    listSize++;
-    assertEquals(notifyList.size(), listSize);
-    PreDropDatabaseEvent preDropDB = (PreDropDatabaseEvent)preNotifyList.get(preNotifyList.size() - 1);
-
-    DropDatabaseEvent dropDB = (DropDatabaseEvent)notifyList.get(listSize-1);
-    assert dropDB.getStatus();
-    validateDropDb(db, dropDB.getDatabase());
-    validateDropDb(db, preDropDB.getDatabase());
-
-    SetProcessor.setVariable("metaconf:hive.metastore.try.direct.sql", "false");
-    ConfigChangeEvent event = (ConfigChangeEvent) notifyList.get(notifyList.size() - 1);
-    assertEquals("hive.metastore.try.direct.sql", event.getKey());
-    assertEquals("true", event.getOldValue());
-    assertEquals("false", event.getNewValue());
-  }
-
-  public void testMetaConfNotifyListenersClosingClient() throws Exception {
-    HiveMetaStoreClient closingClient = new HiveMetaStoreClient(hiveConf, null);
-    closingClient.setMetaConf(metaConfKey, "[test pattern modified]");
-    ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
-    assertEquals(event.getOldValue(), metaConfVal);
-    assertEquals(event.getNewValue(), "[test pattern modified]");
-    closingClient.close();
-
-    Thread.sleep(5 * 1000);
-
-    event = (ConfigChangeEvent) DummyListener.getLastEvent();
-    assertEquals(event.getOldValue(), "[test pattern modified]");
-    assertEquals(event.getNewValue(), metaConfVal);
-  }
-
-  public void testMetaConfNotifyListenersNonClosingClient() throws Exception {
-    HiveMetaStoreClient nonClosingClient = new HiveMetaStoreClient(hiveConf, null);
-    nonClosingClient.setMetaConf(metaConfKey, "[test pattern modified]");
-    ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
-    assertEquals(event.getOldValue(), metaConfVal);
-    assertEquals(event.getNewValue(), "[test pattern modified]");
-    // This should also trigger meta listener notification via TServerEventHandler#deleteContext
-    nonClosingClient.getTTransport().close();
-
-    Thread.sleep(5 * 1000);
-
-    event = (ConfigChangeEvent) DummyListener.getLastEvent();
-    assertEquals(event.getOldValue(), "[test pattern modified]");
-    assertEquals(event.getNewValue(), metaConfVal);
-  }
-
-  public void testMetaConfDuplicateNotification() throws Exception {
-    HiveMetaStoreClient closingClient = new HiveMetaStoreClient(hiveConf, null);
-    closingClient.setMetaConf(metaConfKey, metaConfVal);
-    int beforeCloseNotificationEventCounts = DummyListener.notifyList.size();
-    closingClient.close();
-
-    Thread.sleep(5 * 1000);
-
-    int afterCloseNotificationEventCounts = DummyListener.notifyList.size();
-    // Setting key to same value, should not trigger configChange event during shutdown
-    assertEquals(beforeCloseNotificationEventCounts, afterCloseNotificationEventCounts);
-  }
-
-  public void testMetaConfSameHandler() throws Exception {
-    HiveMetaStoreClient closingClient = new HiveMetaStoreClient(hiveConf, null);
-    closingClient.setMetaConf(metaConfKey, "[test pattern modified]");
-    ConfigChangeEvent event = (ConfigChangeEvent) DummyListener.getLastEvent();
-    int beforeCloseNotificationEventCounts = DummyListener.notifyList.size();
-    IHMSHandler beforeHandler = event.getIHMSHandler();
-    closingClient.close();
-
-    Thread.sleep(5 * 1000);
-    event = (ConfigChangeEvent) DummyListener.getLastEvent();
-    int afterCloseNotificationEventCounts = DummyListener.notifyList.size();
-    IHMSHandler afterHandler = event.getIHMSHandler();
-    // Meta-conf cleanup should trigger an event to listener
-    assertNotSame(beforeCloseNotificationEventCounts, afterCloseNotificationEventCounts);
-    // Both the handlers should be same
-    assertEquals(beforeHandler, afterHandler);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java
deleted file mode 100644
index cc2c5f9..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreEventListenerOnlyOnCommit.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import java.util.List;
-
-import org.apache.hadoop.hive.cli.CliSessionState;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.events.ListenerEvent;
-import org.apache.hadoop.hive.ql.DriverFactory;
-import org.apache.hadoop.hive.ql.IDriver;
-import org.apache.hadoop.hive.ql.session.SessionState;
-
-import junit.framework.TestCase;
-
-/**
- * Ensure that the status of MetaStore events depend on the RawStore's commit status.
- */
-public class TestMetaStoreEventListenerOnlyOnCommit extends TestCase {
-
-  private HiveConf hiveConf;
-  private HiveMetaStoreClient msc;
-  private IDriver driver;
-
-  @Override
-  protected void setUp() throws Exception {
-
-    super.setUp();
-
-    DummyRawStoreControlledCommit.setCommitSucceed(true);
-
-    System.setProperty(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS.varname,
-            DummyListener.class.getName());
-    System.setProperty(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL.varname,
-            DummyRawStoreControlledCommit.class.getName());
-
-    int port = MetaStoreTestUtils.startMetaStoreWithRetry();
-
-    hiveConf = new HiveConf(this.getClass());
-    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
-    hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
-    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-    SessionState.start(new CliSessionState(hiveConf));
-    msc = new HiveMetaStoreClient(hiveConf);
-    driver = DriverFactory.newDriver(hiveConf);
-
-    DummyListener.notifyList.clear();
-  }
-
-  public void testEventStatus() throws Exception {
-    int listSize = 0;
-    List<ListenerEvent> notifyList = DummyListener.notifyList;
-    assertEquals(notifyList.size(), listSize);
-
-    driver.run("CREATE DATABASE tmpDb");
-    listSize += 1;
-    notifyList = DummyListener.notifyList;
-    assertEquals(notifyList.size(), listSize);
-    assertTrue(DummyListener.getLastEvent().getStatus());
-
-    driver.run("CREATE TABLE unittest_TestMetaStoreEventListenerOnlyOnCommit (id INT) " +
-                "PARTITIONED BY (ds STRING)");
-    listSize += 1;
-    notifyList = DummyListener.notifyList;
-    assertEquals(notifyList.size(), listSize);
-    assertTrue(DummyListener.getLastEvent().getStatus());
-
-    driver.run("ALTER TABLE unittest_TestMetaStoreEventListenerOnlyOnCommit " +
-                "ADD PARTITION(ds='foo1')");
-    listSize += 1;
-    notifyList = DummyListener.notifyList;
-    assertEquals(notifyList.size(), listSize);
-    assertTrue(DummyListener.getLastEvent().getStatus());
-
-    DummyRawStoreControlledCommit.setCommitSucceed(false);
-
-    driver.run("ALTER TABLE unittest_TestMetaStoreEventListenerOnlyOnCommit " +
-                "ADD PARTITION(ds='foo2')");
-    listSize += 1;
-    notifyList = DummyListener.notifyList;
-    assertEquals(notifyList.size(), listSize);
-    assertFalse(DummyListener.getLastEvent().getStatus());
-
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java
deleted file mode 100644
index 025cc40..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreInitListener.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.hive.cli.CliSessionState;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.DriverFactory;
-import org.apache.hadoop.hive.ql.IDriver;
-import org.apache.hadoop.hive.ql.session.SessionState;
-
-import junit.framework.TestCase;
-
-/**
- * TestMetaStoreInitListener. Test case for
- * {@link org.apache.hadoop.hive.metastore.MetaStoreInitListener}
- */
-public class TestMetaStoreInitListener extends TestCase {
-  private HiveConf hiveConf;
-  private HiveMetaStoreClient msc;
-  private IDriver driver;
-
-  @Override
-  protected void setUp() throws Exception {
-
-    super.setUp();
-    System.setProperty("hive.metastore.init.hooks",
-        DummyMetaStoreInitListener.class.getName());
-    int port = MetaStoreTestUtils.startMetaStoreWithRetry();
-    hiveConf = new HiveConf(this.getClass());
-    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
-    hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
-    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-    SessionState.start(new CliSessionState(hiveConf));
-    msc = new HiveMetaStoreClient(hiveConf);
-    driver = DriverFactory.newDriver(hiveConf);
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-    super.tearDown();
-  }
-
-  public void testMetaStoreInitListener() throws Exception {
-    // DummyMataStoreInitListener's onInit will be called at HMSHandler
-    // initialization, and set this to true
-    assertTrue(DummyMetaStoreInitListener.wasCalled);
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java
deleted file mode 100644
index 99b67bb..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreListenersError.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
-
-import junit.framework.Assert;
-import junit.framework.TestCase;
-
-/**
- * Test for unwrapping InvocationTargetException, which is thrown from
- * constructor of listener class
- */
-public class TestMetaStoreListenersError extends TestCase {
-
-  public void testInitListenerException() throws Throwable {
-
-    System.setProperty("hive.metastore.init.hooks", ErrorInitListener.class.getName());
-    int port = MetaStoreTestUtils.findFreePort();
-    try {
-      HiveMetaStore.startMetaStore(port, HadoopThriftAuthBridge.getBridge());
-    } catch (Throwable throwable) {
-      Assert.assertEquals(MetaException.class, throwable.getClass());
-      Assert.assertEquals(
-          "Failed to instantiate listener named: " +
-              "org.apache.hadoop.hive.metastore.TestMetaStoreListenersError$ErrorInitListener, " +
-              "reason: java.lang.IllegalArgumentException: exception on constructor",
-          throwable.getMessage());
-    }
-  }
-
-  public void testEventListenerException() throws Throwable {
-
-    System.setProperty("hive.metastore.init.hooks", "");
-    System.setProperty("hive.metastore.event.listeners", ErrorEventListener.class.getName());
-    int port = MetaStoreTestUtils.findFreePort();
-    try {
-      HiveMetaStore.startMetaStore(port, HadoopThriftAuthBridge.getBridge());
-    } catch (Throwable throwable) {
-      Assert.assertEquals(MetaException.class, throwable.getClass());
-      Assert.assertEquals(
-          "Failed to instantiate listener named: " +
-              "org.apache.hadoop.hive.metastore.TestMetaStoreListenersError$ErrorEventListener, " +
-              "reason: java.lang.IllegalArgumentException: exception on constructor",
-          throwable.getMessage());
-    }
-  }
-
-  public static class ErrorInitListener extends MetaStoreInitListener {
-
-    public ErrorInitListener(Configuration config) {
-      super(config);
-      throw new IllegalArgumentException("exception on constructor");
-    }
-
-    public void onInit(MetaStoreInitContext context) throws MetaException {
-    }
-  }
-
-  public static class ErrorEventListener extends MetaStoreEventListener {
-
-    public ErrorEventListener(Configuration config) {
-      super(config);
-      throw new IllegalArgumentException("exception on constructor");
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreInitRetry.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreInitRetry.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreInitRetry.java
deleted file mode 100644
index 1695bfd..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStoreInitRetry.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-
-import java.sql.DriverManager;
-import java.sql.SQLException;
-
-import javax.jdo.JDOCanRetryException;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-
-public class TestObjectStoreInitRetry {
-
-  private static boolean noisy = true; // switch to true to see line number debug traces for FakeDerby calls
-
-  private static int injectConnectFailure = 0;
-
-  public static void setInjectConnectFailure(int x){
-    injectConnectFailure = x;
-  }
-
-  public static int getInjectConnectFailure(){
-    return injectConnectFailure;
-  }
-
-  public static void decrementInjectConnectFailure(){
-    injectConnectFailure--;
-  }
-
-  @BeforeClass
-  public static void oneTimeSetup() throws SQLException {
-    // dummy instantiation to make sure any static/ctor code blocks of that
-    // driver are loaded and ready to go.
-    DriverManager.registerDriver(new FakeDerby());
-  }
-
-  @AfterClass
-  public static void oneTimeTearDown() throws SQLException {
-    DriverManager.deregisterDriver(new FakeDerby());
-  }
-
-  public static void misbehave() throws RuntimeException{
-    TestObjectStoreInitRetry.debugTrace();
-    if (TestObjectStoreInitRetry.getInjectConnectFailure() > 0){
-      TestObjectStoreInitRetry.decrementInjectConnectFailure();
-      RuntimeException re = new JDOCanRetryException();
-      if (noisy){
-        System.err.println("MISBEHAVE:" + TestObjectStoreInitRetry.getInjectConnectFailure());
-        re.printStackTrace(System.err);
-      }
-      throw re;
-    }
-  }
-
-  // debug instrumenter - useful in finding which fns get called, and how often
-  public static void debugTrace() {
-    if (noisy){
-      Exception e = new Exception();
-      System.err.println("." + e.getStackTrace()[1].getLineNumber() + ":" + TestObjectStoreInitRetry.getInjectConnectFailure());
-    }
-  }
-
-  protected static HiveConf hiveConf;
-
-  @Test
-  public void testObjStoreRetry() throws Exception {
-    hiveConf = new HiveConf(this.getClass());
-
-    hiveConf.setIntVar(ConfVars.HMSHANDLERATTEMPTS, 4);
-    hiveConf.setVar(ConfVars.HMSHANDLERINTERVAL, "1s");
-    hiveConf.setVar(ConfVars.METASTORE_CONNECTION_DRIVER,FakeDerby.class.getName());
-    hiveConf.setBoolVar(ConfVars.METASTORE_TRY_DIRECT_SQL,true);
-    String jdbcUrl = hiveConf.get(ConfVars.METASTORECONNECTURLKEY.varname);
-    jdbcUrl = jdbcUrl.replace("derby","fderby");
-    hiveConf.setVar(ConfVars.METASTORECONNECTURLKEY,jdbcUrl);
-
-    ObjectStore objStore = new ObjectStore();
-
-    Exception savE = null;
-    try {
-      setInjectConnectFailure(5);
-      objStore.setConf(hiveConf);
-    } catch (Exception e) {
-      e.printStackTrace(System.err);
-      savE = e;
-    }
-
-    /**
-     * A note on retries.
-     *
-     * We've configured a total of 4 attempts.
-     * 5 - 4 == 1 connect failure simulation count left after this.
-     */
-
-    assertEquals(1, getInjectConnectFailure());
-    assertNotNull(savE);
-
-    setInjectConnectFailure(0);
-    objStore.setConf(hiveConf);
-    assertEquals(0, getInjectConnectFailure());
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionNameWhitelistValidation.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionNameWhitelistValidation.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionNameWhitelistValidation.java
deleted file mode 100644
index e3e175b..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestPartitionNameWhitelistValidation.java
+++ /dev/null
@@ -1,123 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import static org.junit.Assert.*;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.hive.cli.CliSessionState;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.session.SessionState;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-// Validate the metastore client call validatePartitionNameCharacters to ensure it throws
-// an exception if partition fields contain Unicode characters or commas
-
-public class TestPartitionNameWhitelistValidation {
-
-  private static final String partitionValidationPattern = "[\\x20-\\x7E&&[^,]]*";
-  private static HiveConf hiveConf;
-  private static HiveMetaStoreClient msc;
-
-  @BeforeClass
-  public static void setupBeforeClass() throws Exception {
-    System.setProperty(HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN.varname,
-        partitionValidationPattern);
-    hiveConf = new HiveConf();
-    SessionState.start(new CliSessionState(hiveConf));
-    msc = new HiveMetaStoreClient(hiveConf);
-  }
-
-  // Runs an instance of DisallowUnicodePreEventListener
-  // Returns whether or not it succeeded
-  private boolean runValidation(List<String> partVals) {
-    try {
-      msc.validatePartitionNameCharacters(partVals);
-    } catch (Exception e) {
-      return false;
-    }
-
-    return true;
- }
-
-  // Sample data
-  private List<String> getPartValsWithUnicode() {
-    List<String> partVals = new ArrayList<String>();
-    partVals.add("klâwen");
-    partVals.add("tägelîch");
-
-    return partVals;
-  }
-
-  private List<String> getPartValsWithCommas() {
-    List<String> partVals = new ArrayList<String>();
-    partVals.add("a,b");
-    partVals.add("c,d,e,f");
-
-    return partVals;
-  }
-
-  private List<String> getPartValsWithValidCharacters() {
-    List<String> partVals = new ArrayList<String>();
-    partVals.add("part1");
-    partVals.add("part2");
-
-    return partVals;
-  }
-
-  @Test
-  public void testAddPartitionWithCommas() {
-    assertFalse("Add a partition with commas in name",
-        runValidation(getPartValsWithCommas()));
-  }
-
-  @Test
-  public void testAddPartitionWithUnicode() {
-    assertFalse("Add a partition with unicode characters in name",
-        runValidation(getPartValsWithUnicode()));
-  }
-
-  @Test
-  public void testAddPartitionWithValidPartVal() {
-    assertTrue("Add a partition with unicode characters in name",
-        runValidation(getPartValsWithValidCharacters()));
-  }
-
-  @Test
-  public void testAppendPartitionWithUnicode() {
-    assertFalse("Append a partition with unicode characters in name",
-        runValidation(getPartValsWithUnicode()));
-  }
-
-  @Test
-  public void testAppendPartitionWithCommas() {
-    assertFalse("Append a partition with unicode characters in name",
-        runValidation(getPartValsWithCommas()));
-  }
-
-  @Test
-  public void testAppendPartitionWithValidCharacters() {
-    assertTrue("Append a partition with no unicode characters in name",
-        runValidation(getPartValsWithValidCharacters()));
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java
deleted file mode 100644
index ec84e66..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStore.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-
-
-public class TestRemoteHiveMetaStore extends TestHiveMetaStore {
-  private static boolean isServerStarted = false;
-  protected static int port;
-
-  public TestRemoteHiveMetaStore() {
-    super();
-    isThriftClient = true;
-  }
-
-  @Override
-  protected void setUp() throws Exception {
-    super.setUp();
-
-    if (isServerStarted) {
-      assertNotNull("Unable to connect to the MetaStore server", client);
-      hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
-      return;
-    }
-
-    port = MetaStoreTestUtils.startMetaStoreWithRetry(hiveConf);
-    System.out.println("Starting MetaStore Server on port " + port);
-    isServerStarted = true;
-
-    // This is default case with setugi off for both client and server
-    client = createClient();
-  }
-
-  @Override
-  protected HiveMetaStoreClient createClient() throws Exception {
-    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
-    hiveConf.setBoolVar(ConfVars.METASTORE_EXECUTE_SET_UGI, false);
-    return new HiveMetaStoreClient(hiveConf);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java
deleted file mode 100644
index c7c35f3..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteHiveMetaStoreIpAddress.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
-import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
-import org.apache.hadoop.util.StringUtils;
-
-/**
- *
- * TestRemoteHiveMetaStoreIpAddress.
- *
- * Test which checks that the remote Hive metastore stores the proper IP address using
- * IpAddressListener
- */
-public class TestRemoteHiveMetaStoreIpAddress extends TestCase {
-  private static boolean isServerStarted = false;
-  private static HiveConf hiveConf;
-  private static HiveMetaStoreClient msc;
-
-  @Override
-  protected void setUp() throws Exception {
-    super.setUp();
-    hiveConf = new HiveConf(this.getClass());
-
-    if (isServerStarted) {
-      assertNotNull("Unable to connect to the MetaStore server", msc);
-      return;
-    }
-
-    System.setProperty(ConfVars.METASTORE_EVENT_LISTENERS.varname,
-        IpAddressListener.class.getName());
-    int port = MetaStoreTestUtils.startMetaStoreWithRetry();
-    System.out.println("Started MetaStore Server on port " + port);
-    isServerStarted = true;
-
-    // This is default case with setugi off for both client and server
-    createClient(port);
-  }
-
-  public void testIpAddress() throws Exception {
-    try {
-
-      Database db = new Database();
-      db.setName("testIpAddressIp");
-      msc.createDatabase(db);
-      msc.dropDatabase(db.getName());
-    } catch (Exception e) {
-      System.err.println(StringUtils.stringifyException(e));
-      System.err.println("testIpAddress() failed.");
-      throw e;
-    }
-  }
-
-  protected void createClient(int port) throws Exception {
-    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
-    msc = new HiveMetaStoreClient(hiveConf);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteUGIHiveMetaStoreIpAddress.java
----------------------------------------------------------------------
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteUGIHiveMetaStoreIpAddress.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteUGIHiveMetaStoreIpAddress.java
deleted file mode 100644
index 8658262..0000000
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestRemoteUGIHiveMetaStoreIpAddress.java
+++ /dev/null
@@ -1,28 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-
-public class TestRemoteUGIHiveMetaStoreIpAddress extends TestRemoteHiveMetaStoreIpAddress {
-  public TestRemoteUGIHiveMetaStoreIpAddress() {
-    super();
-    System.setProperty(ConfVars.METASTORE_EXECUTE_SET_UGI.varname, "true");
-  }
-
-}