You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ga...@apache.org on 2017/11/02 16:23:08 UTC

[06/12] hive git commit: HIVE-17812 Move remaining classes that HiveMetaStore depends on. This closes #261. (Alan Gates, reviewed by Vihang Karajgaonkar)

http://git-wip-us.apache.org/repos/asf/hive/blob/c5a9673a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
index 39d6b2b..662462c 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java
@@ -17,9 +17,9 @@
  */
 package org.apache.hadoop.hive.ql;
 
-import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.RunnableConfigurable;
 import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
 import org.apache.hadoop.hive.metastore.api.LockState;
 import org.apache.hadoop.hive.metastore.api.LockType;
@@ -27,6 +27,7 @@ import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
 import org.apache.hadoop.hive.metastore.api.TxnInfo;
 import org.apache.hadoop.hive.metastore.api.TxnState;
+import org.apache.hadoop.hive.metastore.txn.AcidHouseKeeperService;
 import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
 import org.apache.hadoop.hive.metastore.txn.TxnStore;
 import org.apache.hadoop.hive.metastore.txn.TxnUtils;
@@ -36,7 +37,6 @@ import org.apache.hadoop.hive.ql.io.BucketCodec;
 import org.apache.hadoop.hive.ql.lockmgr.TestDbTxnManager2;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
-import org.apache.hadoop.hive.ql.txn.AcidHouseKeeperService;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Ignore;
@@ -337,11 +337,11 @@ public class TestTxnCommands extends TxnCommandsBaseForTests {
     runStatementOnDriver("start transaction");
     runStatementOnDriver("delete from " + Table.ACIDTBL + " where a = 5");
     //make sure currently running txn is considered aborted by housekeeper
-    hiveConf.setTimeVar(HiveConf.ConfVars.HIVE_TIMEDOUT_TXN_REAPER_START, 0, TimeUnit.SECONDS);
     hiveConf.setTimeVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 2, TimeUnit.MILLISECONDS);
-    AcidHouseKeeperService houseKeeperService = new AcidHouseKeeperService();
+    RunnableConfigurable houseKeeperService = new AcidHouseKeeperService();
+    houseKeeperService.setConf(hiveConf);
     //this will abort the txn
-    TestTxnCommands2.runHouseKeeperService(houseKeeperService, hiveConf);
+    houseKeeperService.run();
     //this should fail because txn aborted due to timeout
     CommandProcessorResponse cpr = runStatementOnDriverNegative("delete from " + Table.ACIDTBL + " where a = 5");
     Assert.assertTrue("Actual: " + cpr.getErrorMessage(), cpr.getErrorMessage().contains("Transaction manager has aborted the transaction txnid:1"));
@@ -349,6 +349,8 @@ public class TestTxnCommands extends TxnCommandsBaseForTests {
     //now test that we don't timeout locks we should not
     //heartbeater should be running in the background every 1/2 second
     hiveConf.setTimeVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 1, TimeUnit.SECONDS);
+    // Have to reset the conf when we change it so that the change takes affect
+    houseKeeperService.setConf(hiveConf);
     //hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILHEARTBEATER, true);
     runStatementOnDriver("start transaction");
     runStatementOnDriver("select count(*) from " + Table.ACIDTBL + " where a = 17");
@@ -380,14 +382,14 @@ public class TestTxnCommands extends TxnCommandsBaseForTests {
     ShowLocksResponse slr = txnHandler.showLocks(new ShowLocksRequest());
     TestDbTxnManager2.checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", Table.ACIDTBL.name, null, slr.getLocks());
     pause(750);
-    TestTxnCommands2.runHouseKeeperService(houseKeeperService, hiveConf);
+    houseKeeperService.run();
     pause(750);
     slr = txnHandler.showLocks(new ShowLocksRequest());
     Assert.assertEquals("Unexpected lock count: " + slr, 1, slr.getLocks().size());
     TestDbTxnManager2.checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", Table.ACIDTBL.name, null, slr.getLocks());
 
     pause(750);
-    TestTxnCommands2.runHouseKeeperService(houseKeeperService, hiveConf);
+    houseKeeperService.run();
     slr = txnHandler.showLocks(new ShowLocksRequest());
     Assert.assertEquals("Unexpected lock count: " + slr, 1, slr.getLocks().size());
     TestDbTxnManager2.checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", Table.ACIDTBL.name, null, slr.getLocks());

http://git-wip-us.apache.org/repos/asf/hive/blob/c5a9673a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
index 3737b6a..2faf098 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.common.FileUtils;
 import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HouseKeeperService;
 import org.apache.hadoop.hive.metastore.api.CommitTxnRequest;
 import org.apache.hadoop.hive.metastore.api.CompactionRequest;
 import org.apache.hadoop.hive.metastore.api.CompactionType;
@@ -46,6 +45,7 @@ import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
 import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
 import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
 import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement;
+import org.apache.hadoop.hive.metastore.txn.AcidCompactionHistoryService;
 import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
 import org.apache.hadoop.hive.metastore.txn.TxnStore;
 import org.apache.hadoop.hive.metastore.txn.TxnUtils;
@@ -54,7 +54,6 @@ import org.apache.hadoop.hive.ql.io.BucketCodec;
 import org.apache.hadoop.hive.ql.io.HiveInputFormat;
 import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse;
 import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.ql.txn.AcidCompactionHistoryService;
 import org.apache.hadoop.hive.metastore.txn.AcidOpenTxnsCounterService;
 import org.apache.hadoop.hive.ql.txn.compactor.Cleaner;
 import org.apache.hadoop.hive.ql.txn.compactor.Initiator;
@@ -1041,7 +1040,8 @@ public class TestTxnCommands2 {
 
     hiveConf.setTimeVar(HiveConf.ConfVars.COMPACTOR_HISTORY_REAPER_INTERVAL, 10, TimeUnit.MILLISECONDS);
     AcidCompactionHistoryService compactionHistoryService = new AcidCompactionHistoryService();
-    runHouseKeeperService(compactionHistoryService, hiveConf);//should not remove anything from history
+    compactionHistoryService.setConf(hiveConf);
+    compactionHistoryService.run();
     checkCompactionState(new CompactionsByState(numAttemptedCompactions,numFailedCompactions,0,0,0,0,numFailedCompactions + numAttemptedCompactions), countCompacts(txnHandler));
 
     txnHandler.compact(new CompactionRequest("default", tblName, CompactionType.MAJOR));
@@ -1054,7 +1054,7 @@ public class TestTxnCommands2 {
     numAttemptedCompactions++;
     checkCompactionState(new CompactionsByState(numAttemptedCompactions,numFailedCompactions + 2,0,0,0,0,numFailedCompactions + 2 + numAttemptedCompactions), countCompacts(txnHandler));
 
-    runHouseKeeperService(compactionHistoryService, hiveConf);//should remove history so that we have
+    compactionHistoryService.run();
     //COMPACTOR_HISTORY_RETENTION_FAILED failed compacts left (and no other since we only have failed ones here)
     checkCompactionState(new CompactionsByState(
       hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED),
@@ -1078,7 +1078,7 @@ public class TestTxnCommands2 {
         hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED)+ 1), countCompacts(txnHandler));
 
     runCleaner(hiveConf); // transition to Success state
-    runHouseKeeperService(compactionHistoryService, hiveConf);//should not purge anything as all items within retention sizes
+    compactionHistoryService.run();
     checkCompactionState(new CompactionsByState(
       hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED),
       hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED),0,0,1,0,
@@ -1198,26 +1198,6 @@ public class TestTxnCommands2 {
     t.run();
   }
 
-  public static void runHouseKeeperService(HouseKeeperService houseKeeperService, HiveConf conf) throws Exception {
-    int lastCount = houseKeeperService.getIsAliveCounter();
-    houseKeeperService.start(conf);
-    int maxIter = 10;
-    int iterCount = 0;
-    while(houseKeeperService.getIsAliveCounter() <= lastCount) {
-      if(iterCount++ >= maxIter) {
-        //prevent test hangs
-        throw new IllegalStateException("HouseKeeper didn't run after " + (iterCount - 1) + " waits");
-      }
-      try {
-        Thread.sleep(100);//make sure it has run at least once
-      }
-      catch(InterruptedException ex) {
-        //...
-      }
-    }
-    houseKeeperService.stop();
-  }
-
   /**
    * HIVE-12352 has details
    * @throws Exception

http://git-wip-us.apache.org/repos/asf/hive/blob/c5a9673a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java
index e46e65b..406bdea 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java
@@ -19,12 +19,14 @@ package org.apache.hadoop.hive.ql.lockmgr;
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.ThreadPool;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
 import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement;
 import org.apache.hadoop.hive.metastore.api.TxnState;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.txn.AcidHouseKeeperService;
 import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
 import org.apache.hadoop.hive.metastore.txn.TxnStore;
 import org.apache.hadoop.hive.metastore.txn.TxnUtils;
@@ -38,7 +40,6 @@ import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.plan.HiveOperation;
 import org.apache.hadoop.hive.ql.session.SessionState;
-import org.apache.hadoop.hive.ql.txn.AcidHouseKeeperService;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -202,24 +203,9 @@ public class TestDbTxnManager {
    * aborts timed out transactions
    */
   private void runReaper() throws Exception {
-    int lastCount = houseKeeperService.getIsAliveCounter();
-    houseKeeperService.start(conf);
-    int maxIter = 10;
-    int iterCount = 0;
-    while(houseKeeperService.getIsAliveCounter() <= lastCount) {
-      if(iterCount++ >= maxIter) {
-        //prevent test hangs
-        throw new IllegalStateException("Reaper didn't run after " + iterCount + " waits");
-      }
-      try {
-        Thread.sleep(100);//make sure it has run at least once
-      }
-      catch(InterruptedException ex) {
-        //...
-      }
-    }
-    houseKeeperService.stop();
+    houseKeeperService.run();
   }
+
   @Test
   public void testExceptions() throws Exception {
     addPartitionOutput(newTable(true), WriteEntity.WriteType.INSERT);
@@ -491,13 +477,14 @@ public class TestDbTxnManager {
     conf.setTimeVar(HiveConf.ConfVars.HIVE_TIMEDOUT_TXN_REAPER_START, 0, TimeUnit.SECONDS);
     conf.setTimeVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 10, TimeUnit.SECONDS);
     houseKeeperService = new AcidHouseKeeperService();
+    houseKeeperService.setConf(conf);
   }
 
   @After
   public void tearDown() throws Exception {
-    if(houseKeeperService != null) houseKeeperService.stop();
     if (txnMgr != null) txnMgr.closeTxnManager();
     TxnDbUtil.cleanDb(conf);
+    ThreadPool.shutdown();
   }
 
   private static class MockQueryPlan extends QueryPlan {

http://git-wip-us.apache.org/repos/asf/hive/blob/c5a9673a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
index 15045d6..3c172b9 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
@@ -19,12 +19,13 @@ package org.apache.hadoop.hive.ql.lockmgr;
 
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hive.common.JavaUtils;
+import org.apache.hadoop.hive.metastore.RunnableConfigurable;
 import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions;
 import org.apache.hadoop.hive.metastore.api.DataOperationType;
+import org.apache.hadoop.hive.metastore.txn.AcidWriteSetService;
 import org.apache.hadoop.hive.metastore.txn.TxnStore;
 import org.apache.hadoop.hive.metastore.txn.TxnUtils;
 import org.apache.hadoop.hive.ql.TestTxnCommands2;
-import org.apache.hadoop.hive.ql.txn.AcidWriteSetService;
 import org.junit.After;
 import org.junit.Assert;
 import org.apache.hadoop.hive.common.FileUtils;
@@ -1036,7 +1037,8 @@ public class TestDbTxnManager2 {
       1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET"));
 
     AcidWriteSetService houseKeeper = new AcidWriteSetService();
-    TestTxnCommands2.runHouseKeeperService(houseKeeper, conf);
+    houseKeeper.setConf(conf);
+    houseKeeper.run();
     //since T3 overlaps with Long Running (still open) GC does nothing
     Assert.assertEquals(1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET"));
     checkCmdOnDriver(driver.compileAndRespond("update TAB2 set b = 17 where a = 1"));//no rows match
@@ -1050,7 +1052,7 @@ public class TestDbTxnManager2 {
 
     locks = getLocks(txnMgr);
     Assert.assertEquals("Unexpected lock count", 0, locks.size());
-    TestTxnCommands2.runHouseKeeperService(houseKeeper, conf);
+    houseKeeper.run();
     Assert.assertEquals(0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET"));
   }
   /**
@@ -1120,7 +1122,9 @@ public class TestDbTxnManager2 {
     Assert.assertEquals("Unexpected lock count", 1, locks.size());
     checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "TAB2", null, locks);
     txnMgr.commitTxn();
-    TestTxnCommands2.runHouseKeeperService(new AcidWriteSetService(), conf);
+    RunnableConfigurable writeSetService = new AcidWriteSetService();
+    writeSetService.setConf(conf);
+    writeSetService.run();
     Assert.assertEquals(0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET"));
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/c5a9673a/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java
----------------------------------------------------------------------
diff --git a/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java b/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java
index 277738f..85ee8c7 100644
--- a/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java
+++ b/shims/common/src/main/java/org/apache/hadoop/hive/io/HdfsUtils.java
@@ -51,17 +51,6 @@ import com.google.common.collect.Iterables;
 public class HdfsUtils {
   private static final Logger LOG = LoggerFactory.getLogger("shims.HdfsUtils");
 
-  // TODO: this relies on HDFS not changing the format; we assume if we could get inode ID, this
-  //       is still going to work. Otherwise, file IDs can be turned off. Later, we should use
-  //       as public utility method in HDFS to obtain the inode-based path.
-  private static final String HDFS_ID_PATH_PREFIX = "/.reserved/.inodes/";
-
-  public static Path getFileIdPath(
-      FileSystem fileSystem, Path path, long fileId) {
-    return (fileSystem instanceof DistributedFileSystem)
-        ? new Path(HDFS_ID_PATH_PREFIX + fileId) : path;
-  }
-
   /**
    * Copy the permissions, group, and ACLs from a source {@link HadoopFileStatus} to a target {@link Path}. This method
    * will only log a warning if permissions cannot be set, no exception will be thrown.

http://git-wip-us.apache.org/repos/asf/hive/blob/c5a9673a/standalone-metastore/pom.xml
----------------------------------------------------------------------
diff --git a/standalone-metastore/pom.xml b/standalone-metastore/pom.xml
index 8df622f..a2a34a5 100644
--- a/standalone-metastore/pom.xml
+++ b/standalone-metastore/pom.xml
@@ -236,6 +236,12 @@
       <artifactId>javax.jdo</artifactId>
       <version>${datanucleus-jdo.version}</version>
     </dependency>
+    <dependency>
+      <groupId>org.skyscreamer</groupId>
+      <artifactId>jsonassert</artifactId>
+      <version>1.4.0</version>
+      <scope>test</scope>
+    </dependency>
 
     <!-- test scope dependencies -->
 

http://git-wip-us.apache.org/repos/asf/hive/blob/c5a9673a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapability.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapability.java b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapability.java
index 8fc8311..8b0d71c 100644
--- a/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapability.java
+++ b/standalone-metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapability.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Autogenerated by Thrift Compiler (0.9.3)
  *
  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
@@ -7,17 +7,13 @@
 package org.apache.hadoop.hive.metastore.api;
 
 
-import java.util.Map;
-import java.util.HashMap;
-import org.apache.thrift.TEnum;
-
 public enum ClientCapability implements org.apache.thrift.TEnum {
   TEST_CAPABILITY(1),
   INSERT_ONLY_TABLES(2);
 
   private final int value;
 
-  private ClientCapability(int value) {
+  ClientCapability(int value) {
     this.value = value;
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/c5a9673a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AcidEventListener.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AcidEventListener.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AcidEventListener.java
new file mode 100644
index 0000000..1512ffb
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AcidEventListener.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.HiveObjectType;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+import org.apache.hadoop.hive.metastore.txn.TxnStore;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+
+
+/**
+ * It handles cleanup of dropped partition/table/database in ACID related metastore tables
+ */
+public class AcidEventListener extends MetaStoreEventListener {
+
+  private TxnStore txnHandler;
+  private Configuration conf;
+
+  public AcidEventListener(Configuration configuration) {
+    super(configuration);
+    conf = configuration;
+  }
+
+  @Override
+  public void onDropDatabase (DropDatabaseEvent dbEvent) throws MetaException {
+    // We can loop thru all the tables to check if they are ACID first and then perform cleanup,
+    // but it's more efficient to unconditionally perform cleanup for the database, especially
+    // when there are a lot of tables
+    txnHandler = getTxnHandler();
+    txnHandler.cleanupRecords(HiveObjectType.DATABASE, dbEvent.getDatabase(), null, null);
+  }
+
+  @Override
+  public void onDropTable(DropTableEvent tableEvent)  throws MetaException {
+    if (TxnUtils.isAcidTable(tableEvent.getTable())) {
+      txnHandler = getTxnHandler();
+      txnHandler.cleanupRecords(HiveObjectType.TABLE, null, tableEvent.getTable(), null);
+    }
+  }
+
+  @Override
+  public void onDropPartition(DropPartitionEvent partitionEvent)  throws MetaException {
+    if (TxnUtils.isAcidTable(partitionEvent.getTable())) {
+      txnHandler = getTxnHandler();
+      txnHandler.cleanupRecords(HiveObjectType.PARTITION, null, partitionEvent.getTable(),
+          partitionEvent.getPartitionIterator());
+    }
+  }
+
+  private TxnStore getTxnHandler() {
+    boolean hackOn = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) ||
+        MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEZ_TEST);
+    String origTxnMgr = null;
+    boolean origConcurrency = false;
+
+    // Since TxnUtils.getTxnStore calls TxnHandler.setConf -> checkQFileTestHack -> TxnDbUtil.setConfValues,
+    // which may change the values of below two entries, we need to avoid pulluting the original values
+    if (hackOn) {
+      origTxnMgr = MetastoreConf.getVar(conf, ConfVars.HIVE_TXN_MANAGER);
+      origConcurrency = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY);
+    }
+
+    txnHandler = TxnUtils.getTxnStore(conf);
+
+    // Set them back
+    if (hackOn) {
+      MetastoreConf.setVar(conf, ConfVars.HIVE_TXN_MANAGER, origTxnMgr);
+      MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, origConcurrency);
+    }
+
+    return txnHandler;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/c5a9673a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
new file mode 100644
index 0000000..fc0b4d7
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
@@ -0,0 +1,195 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import java.util.List;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+
+/**
+ * Interface for Alter Table and Alter Partition code
+ */
+public interface AlterHandler extends Configurable {
+
+  /**
+   * @deprecated As of release 2.2.0. Replaced by {@link #alterTable(RawStore, Warehouse, String,
+   * String, Table, EnvironmentContext, IHMSHandler)}
+   *
+   * handles alter table, the changes could be cascaded to partitions if applicable
+   *
+   * @param msdb
+   *          object to get metadata
+   * @param wh
+   *          Hive Warehouse where table data is stored
+   * @param dbname
+   *          database of the table being altered
+   * @param name
+   *          original name of the table being altered. same as
+   *          <i>newTable.tableName</i> if alter op is not a rename.
+   * @param newTable
+   *          new table object
+   * @throws InvalidOperationException
+   *           thrown if the newTable object is invalid
+   * @throws MetaException
+   *           thrown if there is any other error
+   */
+  @Deprecated
+  void alterTable(RawStore msdb, Warehouse wh, String dbname,
+    String name, Table newTable, EnvironmentContext envContext)
+      throws InvalidOperationException, MetaException;
+
+  /**
+   * handles alter table, the changes could be cascaded to partitions if applicable
+   *
+   * @param msdb
+   *          object to get metadata
+   * @param wh
+   *          Hive Warehouse where table data is stored
+   * @param dbname
+   *          database of the table being altered
+   * @param name
+   *          original name of the table being altered. same as
+   *          <i>newTable.tableName</i> if alter op is not a rename.
+   * @param newTable
+   *          new table object
+   * @param handler
+   *          HMSHandle object (required to log event notification)
+   * @throws InvalidOperationException
+   *           thrown if the newTable object is invalid
+   * @throws MetaException
+   *           thrown if there is any other error
+   */
+  void alterTable(RawStore msdb, Warehouse wh, String dbname,
+      String name, Table newTable, EnvironmentContext envContext,
+      IHMSHandler handler) throws InvalidOperationException, MetaException;
+
+  /**
+   * @deprecated As of release 2.2.0.  Replaced by {@link #alterPartition(RawStore, Warehouse, String,
+   * String, List, Partition, EnvironmentContext, IHMSHandler)}
+   *
+   * handles alter partition
+   *
+   * @param msdb
+   *          object to get metadata
+   * @param wh
+   * @param dbname
+   *          database of the partition being altered
+   * @param name
+   *          table of the partition being altered
+   * @param part_vals
+   *          original values of the partition being altered
+   * @param new_part
+   *          new partition object
+   * @return the altered partition
+   * @throws InvalidOperationException
+   * @throws InvalidObjectException
+   * @throws AlreadyExistsException
+   * @throws MetaException
+   */
+  @Deprecated
+  Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname,
+    final String name, final List<String> part_vals, final Partition new_part,
+    EnvironmentContext environmentContext)
+      throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
+
+  /**
+   * handles alter partition
+   *
+   * @param msdb
+   *          object to get metadata
+   * @param wh
+   * @param dbname
+   *          database of the partition being altered
+   * @param name
+   *          table of the partition being altered
+   * @param part_vals
+   *          original values of the partition being altered
+   * @param new_part
+   *          new partition object
+   * @param handler
+   *          HMSHandle object (required to log event notification)
+   * @return the altered partition
+   * @throws InvalidOperationException
+   * @throws InvalidObjectException
+   * @throws AlreadyExistsException
+   * @throws MetaException
+   */
+  Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname,
+    final String name, final List<String> part_vals, final Partition new_part, EnvironmentContext environmentContext,
+    IHMSHandler handler)
+      throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
+
+  /**
+   * @deprecated As of release 2.2.0. Replaced by {@link #alterPartitions(RawStore, Warehouse, String,
+   * String, List, EnvironmentContext, IHMSHandler)}
+   *
+   * handles alter partitions
+   *
+   * @param msdb
+   *          object to get metadata
+   * @param wh
+   * @param dbname
+   *          database of the partition being altered
+   * @param name
+   *          table of the partition being altered
+   * @param new_parts
+   *          new partition list
+   * @return the altered partition list
+   * @throws InvalidOperationException
+   * @throws InvalidObjectException
+   * @throws AlreadyExistsException
+   * @throws MetaException
+   */
+  @Deprecated
+  List<Partition> alterPartitions(final RawStore msdb, Warehouse wh,
+    final String dbname, final String name, final List<Partition> new_parts,
+    EnvironmentContext environmentContext)
+      throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
+
+  /**
+   * handles alter partitions
+   *
+   * @param msdb
+   *          object to get metadata
+   * @param wh
+   * @param dbname
+   *          database of the partition being altered
+   * @param name
+   *          table of the partition being altered
+   * @param new_parts
+   *          new partition list
+   * @param handler
+   *          HMSHandle object (required to log event notification)
+   * @return the altered partition list
+   * @throws InvalidOperationException
+   * @throws InvalidObjectException
+   * @throws AlreadyExistsException
+   * @throws MetaException
+   */
+  List<Partition> alterPartitions(final RawStore msdb, Warehouse wh,
+    final String dbname, final String name, final List<Partition> new_parts,
+    EnvironmentContext environmentContext,IHMSHandler handler)
+      throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/c5a9673a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/FileMetadataManager.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/FileMetadataManager.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/FileMetadataManager.java
new file mode 100644
index 0000000..0b8b310
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/FileMetadataManager.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+
+import org.apache.hadoop.fs.LocatedFileStatus;
+
+import org.apache.hadoop.fs.RemoteIterator;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import com.google.common.collect.Lists;
+
+import java.io.IOException;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.utils.HdfsUtils;
+
+public class FileMetadataManager {
+  private static final Log LOG = LogFactory.getLog(FileMetadataManager.class);
+
+  private final RawStore tlms;
+  private final ExecutorService threadPool;
+  private final Configuration conf;
+
+  private final class CacheUpdateRequest implements Callable<Void> {
+    FileMetadataExprType type;
+    String location;
+
+    public CacheUpdateRequest(FileMetadataExprType type, String location) {
+      this.type = type;
+      this.location = location;
+    }
+
+    @Override
+    public Void call() throws Exception {
+      try {
+        cacheMetadata(type, location);
+      } catch (InterruptedException ex) {
+        Thread.currentThread().interrupt();
+      } catch (Exception ex) {
+        // Nobody can see this exception on the threadpool; just log it.
+        LOG.error("Failed to cache file metadata in background for " + type + ", " + location, ex);
+      }
+      return null;
+    }
+  }
+
+  public FileMetadataManager(RawStore tlms, Configuration conf) {
+    this.tlms = tlms;
+    this.conf = conf;
+    int numThreads = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.FILE_METADATA_THREADS);
+    this.threadPool = Executors.newFixedThreadPool(numThreads,
+        new ThreadFactoryBuilder().setNameFormat("File-Metadata-%d").setDaemon(true).build());
+  }
+
+  public void queueCacheMetadata(String location, FileMetadataExprType type) {
+    threadPool.submit(new CacheUpdateRequest(type, location));
+  }
+
+  private void cacheMetadata(FileMetadataExprType type, String location)
+      throws MetaException, IOException, InterruptedException {
+    Path path = new Path(location);
+    FileSystem fs = path.getFileSystem(conf);
+    List<Path> files;
+    if (!fs.isDirectory(path)) {
+      files = Lists.newArrayList(path);
+    } else {
+      files = new ArrayList<>();
+      RemoteIterator<LocatedFileStatus> iter = fs.listFiles(path, true);
+      while (iter.hasNext()) {
+        // TODO: use fileId right from the list after HDFS-7878; or get dfs client and do it
+        LocatedFileStatus lfs = iter.next();
+        if (lfs.isDirectory()) continue;
+        files.add(lfs.getPath());
+      }
+    }
+    for (Path file : files) {
+      long fileId;
+      // TODO: use the other HdfsUtils here
+      if (!(fs instanceof DistributedFileSystem)) return;
+      try {
+        fileId = HdfsUtils.getFileId(fs, Path.getPathWithoutSchemeAndAuthority(file).toString());
+      } catch (UnsupportedOperationException ex) {
+        LOG.error("Cannot cache file metadata for " + location + "; "
+            + fs.getClass().getCanonicalName() + " does not support fileId");
+        return;
+      }
+      LOG.info("Caching file metadata for " + file + " (file ID " + fileId + ")");
+      file = HdfsUtils.getFileIdPath(fs, file, fileId);
+      tlms.getFileMetadataHandler(type).cacheFileMetadata(fileId, fs, file);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/c5a9673a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HMSMetricsListener.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HMSMetricsListener.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HMSMetricsListener.java
new file mode 100644
index 0000000..1ee6d97
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HMSMetricsListener.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import com.codahale.metrics.Counter;
+import com.codahale.metrics.MetricRegistry;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
+import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+import org.apache.hadoop.hive.metastore.metrics.Metrics;
+import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Report metrics of metadata added, deleted by this Hive Metastore.
+ */
+public class HMSMetricsListener extends MetaStoreEventListener {
+
+  private static final Logger LOGGER = LoggerFactory.getLogger(HMSMetricsListener.class);
+
+  private Counter createdDatabases, deletedDatabases, createdTables, deletedTables, createdParts,
+      deletedParts;
+
+  public HMSMetricsListener(Configuration config) {
+    super(config);
+    createdDatabases = Metrics.getOrCreateCounter(MetricsConstants.CREATE_TOTAL_DATABASES);
+    deletedDatabases = Metrics.getOrCreateCounter(MetricsConstants.DELETE_TOTAL_DATABASES);
+    createdTables = Metrics.getOrCreateCounter(MetricsConstants.CREATE_TOTAL_TABLES);
+    deletedTables = Metrics.getOrCreateCounter(MetricsConstants.DELETE_TOTAL_TABLES);
+    createdParts = Metrics.getOrCreateCounter(MetricsConstants.CREATE_TOTAL_PARTITIONS);
+    deletedParts = Metrics.getOrCreateCounter(MetricsConstants.DELETE_TOTAL_PARTITIONS);
+  }
+
+  @Override
+  public void onCreateDatabase(CreateDatabaseEvent dbEvent) throws MetaException {
+    Metrics.getOrCreateGauge(MetricsConstants.TOTAL_DATABASES).incrementAndGet();
+    createdDatabases.inc();
+  }
+
+  @Override
+  public void onDropDatabase(DropDatabaseEvent dbEvent) throws MetaException {
+    Metrics.getOrCreateGauge(MetricsConstants.TOTAL_DATABASES).decrementAndGet();
+    deletedDatabases.inc();
+  }
+
+  @Override
+  public void onCreateTable(CreateTableEvent tableEvent) throws MetaException {
+    Metrics.getOrCreateGauge(MetricsConstants.TOTAL_TABLES).incrementAndGet();
+    createdTables.inc();
+  }
+
+  @Override
+  public void onDropTable(DropTableEvent tableEvent) throws MetaException {
+    Metrics.getOrCreateGauge(MetricsConstants.TOTAL_TABLES).decrementAndGet();
+    deletedTables.inc();
+  }
+
+  @Override
+  public void onDropPartition(DropPartitionEvent partitionEvent) throws MetaException {
+    Metrics.getOrCreateGauge(MetricsConstants.TOTAL_PARTITIONS).decrementAndGet();
+    deletedParts.inc();
+  }
+
+  @Override
+  public void onAddPartition(AddPartitionEvent partitionEvent) throws MetaException {
+    Metrics.getOrCreateGauge(MetricsConstants.TOTAL_PARTITIONS).incrementAndGet();
+    createdParts.inc();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/c5a9673a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
new file mode 100644
index 0000000..ccadac1
--- /dev/null
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -0,0 +1,897 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Lists;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
+import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
+import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+import org.apache.hadoop.hive.metastore.messaging.EventMessage;
+import org.apache.hadoop.hive.metastore.utils.FileUtils;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+/**
+ * Hive specific implementation of alter
+ */
+public class HiveAlterHandler implements AlterHandler {
+
+  protected Configuration hiveConf;
+  private static final Logger LOG = LoggerFactory.getLogger(HiveAlterHandler.class
+      .getName());
+
+  @Override
+  public Configuration getConf() {
+    return hiveConf;
+  }
+
+  @Override
+  @SuppressWarnings("nls")
+  public void setConf(Configuration conf) {
+    hiveConf = conf;
+  }
+
+  @Override
+  public void alterTable(RawStore msdb, Warehouse wh, String dbname,
+    String name, Table newt, EnvironmentContext environmentContext)
+      throws InvalidOperationException, MetaException {
+    alterTable(msdb, wh, dbname, name, newt, environmentContext, null);
+  }
+
+  @Override
+  public void alterTable(RawStore msdb, Warehouse wh, String dbname,
+      String name, Table newt, EnvironmentContext environmentContext,
+      IHMSHandler handler) throws InvalidOperationException, MetaException {
+    name = name.toLowerCase();
+    dbname = dbname.toLowerCase();
+
+    final boolean cascade = environmentContext != null
+        && environmentContext.isSetProperties()
+        && StatsSetupConst.TRUE.equals(environmentContext.getProperties().get(
+            StatsSetupConst.CASCADE));
+    if (newt == null) {
+      throw new InvalidOperationException("New table is invalid: " + newt);
+    }
+
+    String newTblName = newt.getTableName().toLowerCase();
+    String newDbName = newt.getDbName().toLowerCase();
+
+    if (!MetaStoreUtils.validateName(newTblName, hiveConf)) {
+      throw new InvalidOperationException(newTblName + " is not a valid object name");
+    }
+    String validate = MetaStoreUtils.validateTblColumns(newt.getSd().getCols());
+    if (validate != null) {
+      throw new InvalidOperationException("Invalid column " + validate);
+    }
+
+    Path srcPath = null;
+    FileSystem srcFs;
+    Path destPath = null;
+    FileSystem destFs = null;
+
+    boolean success = false;
+    boolean dataWasMoved = false;
+    Table oldt;
+    List<TransactionalMetaStoreEventListener> transactionalListeners = null;
+    if (handler != null) {
+      transactionalListeners = handler.getTransactionalListeners();
+    }
+
+    try {
+      boolean rename = false;
+      boolean isPartitionedTable = false;
+      List<Partition> parts;
+
+      // check if table with the new name already exists
+      if (!newTblName.equals(name) || !newDbName.equals(dbname)) {
+        if (msdb.getTable(newDbName, newTblName) != null) {
+          throw new InvalidOperationException("new table " + newDbName
+              + "." + newTblName + " already exists");
+        }
+        rename = true;
+      }
+
+      msdb.openTransaction();
+      // get old table
+      oldt = msdb.getTable(dbname, name);
+      if (oldt == null) {
+        throw new InvalidOperationException("table " + dbname + "." + name + " doesn't exist");
+      }
+
+      if (oldt.getPartitionKeysSize() != 0) {
+        isPartitionedTable = true;
+      }
+
+      // Views derive the column type from the base table definition.  So the view definition
+      // can be altered to change the column types.  The column type compatibility checks should
+      // be done only for non-views.
+      if (MetastoreConf.getBoolVar(hiveConf,
+            MetastoreConf.ConfVars.DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES) &&
+          !oldt.getTableType().equals(TableType.VIRTUAL_VIEW.toString())) {
+        // Throws InvalidOperationException if the new column types are not
+        // compatible with the current column types.
+        checkColTypeChangeCompatible(oldt.getSd().getCols(), newt.getSd().getCols());
+      }
+
+      //check that partition keys have not changed, except for virtual views
+      //however, allow the partition comments to change
+      boolean partKeysPartiallyEqual = checkPartialPartKeysEqual(oldt.getPartitionKeys(),
+          newt.getPartitionKeys());
+
+      if(!oldt.getTableType().equals(TableType.VIRTUAL_VIEW.toString())){
+        if (!partKeysPartiallyEqual) {
+          throw new InvalidOperationException("partition keys can not be changed.");
+        }
+      }
+
+      // rename needs change the data location and move the data to the new location corresponding
+      // to the new name if:
+      // 1) the table is not a virtual view, and
+      // 2) the table is not an external table, and
+      // 3) the user didn't change the default location (or new location is empty), and
+      // 4) the table was not initially created with a specified location
+      if (rename
+          && !oldt.getTableType().equals(TableType.VIRTUAL_VIEW.toString())
+          && (oldt.getSd().getLocation().compareTo(newt.getSd().getLocation()) == 0
+            || StringUtils.isEmpty(newt.getSd().getLocation()))
+          && !MetaStoreUtils.isExternalTable(oldt)) {
+        Database olddb = msdb.getDatabase(dbname);
+        // if a table was created in a user specified location using the DDL like
+        // create table tbl ... location ...., it should be treated like an external table
+        // in the table rename, its data location should not be changed. We can check
+        // if the table directory was created directly under its database directory to tell
+        // if it is such a table
+        srcPath = new Path(oldt.getSd().getLocation());
+        String oldtRelativePath = (new Path(olddb.getLocationUri()).toUri())
+            .relativize(srcPath.toUri()).toString();
+        boolean tableInSpecifiedLoc = !oldtRelativePath.equalsIgnoreCase(name)
+            && !oldtRelativePath.equalsIgnoreCase(name + Path.SEPARATOR);
+
+        if (!tableInSpecifiedLoc) {
+          srcFs = wh.getFs(srcPath);
+
+          // get new location
+          Database db = msdb.getDatabase(newDbName);
+          Path databasePath = constructRenamedPath(wh.getDatabasePath(db), srcPath);
+          destPath = new Path(databasePath, newTblName);
+          destFs = wh.getFs(destPath);
+
+          newt.getSd().setLocation(destPath.toString());
+
+          // check that destination does not exist otherwise we will be
+          // overwriting data
+          // check that src and dest are on the same file system
+          if (!FileUtils.equalsFileSystem(srcFs, destFs)) {
+            throw new InvalidOperationException("table new location " + destPath
+                + " is on a different file system than the old location "
+                + srcPath + ". This operation is not supported");
+          }
+
+          try {
+            if (destFs.exists(destPath)) {
+              throw new InvalidOperationException("New location for this table "
+                  + newDbName + "." + newTblName + " already exists : " + destPath);
+            }
+            // check that src exists and also checks permissions necessary, rename src to dest
+            if (srcFs.exists(srcPath) && wh.renameDir(srcPath, destPath, true)) {
+              dataWasMoved = true;
+            }
+          } catch (IOException | MetaException e) {
+            LOG.error("Alter Table operation for " + dbname + "." + name + " failed.", e);
+            throw new InvalidOperationException("Alter Table operation for " + dbname + "." + name +
+                " failed to move data due to: '" + getSimpleMessage(e)
+                + "' See hive log file for details.");
+          }
+        }
+
+        if (isPartitionedTable) {
+          String oldTblLocPath = srcPath.toUri().getPath();
+          String newTblLocPath = dataWasMoved ? destPath.toUri().getPath() : null;
+
+          // also the location field in partition
+          parts = msdb.getPartitions(dbname, name, -1);
+          Map<Partition, ColumnStatistics> columnStatsNeedUpdated = new HashMap<>();
+          for (Partition part : parts) {
+            String oldPartLoc = part.getSd().getLocation();
+            if (dataWasMoved && oldPartLoc.contains(oldTblLocPath)) {
+              URI oldUri = new Path(oldPartLoc).toUri();
+              String newPath = oldUri.getPath().replace(oldTblLocPath, newTblLocPath);
+              Path newPartLocPath = new Path(oldUri.getScheme(), oldUri.getAuthority(), newPath);
+              part.getSd().setLocation(newPartLocPath.toString());
+            }
+            part.setDbName(newDbName);
+            part.setTableName(newTblName);
+            ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, dbname, name,
+                part.getValues(), part.getSd().getCols(), oldt, part);
+            if (colStats != null) {
+              columnStatsNeedUpdated.put(part, colStats);
+            }
+          }
+          msdb.alterTable(dbname, name, newt);
+          // alterPartition is only for changing the partition location in the table rename
+          if (dataWasMoved) {
+            for (Partition part : parts) {
+              msdb.alterPartition(newDbName, newTblName, part.getValues(), part);
+            }
+          }
+
+          for (Entry<Partition, ColumnStatistics> partColStats : columnStatsNeedUpdated.entrySet()) {
+            ColumnStatistics newPartColStats = partColStats.getValue();
+            newPartColStats.getStatsDesc().setDbName(newDbName);
+            newPartColStats.getStatsDesc().setTableName(newTblName);
+            msdb.updatePartitionColumnStatistics(newPartColStats, partColStats.getKey().getValues());
+          }
+        } else {
+          alterTableUpdateTableColumnStats(msdb, oldt, newt);
+        }
+      } else {
+        // operations other than table rename
+        if (MetaStoreUtils.requireCalStats(hiveConf, null, null, newt, environmentContext) &&
+            !isPartitionedTable) {
+          Database db = msdb.getDatabase(newDbName);
+          // Update table stats. For partitioned table, we update stats in alterPartition()
+          MetaStoreUtils.updateTableStatsFast(db, newt, wh, false, true, environmentContext);
+        }
+
+        if (cascade && isPartitionedTable) {
+          //Currently only column related changes can be cascaded in alter table
+          if(!MetaStoreUtils.areSameColumns(oldt.getSd().getCols(), newt.getSd().getCols())) {
+            parts = msdb.getPartitions(dbname, name, -1);
+            for (Partition part : parts) {
+              List<FieldSchema> oldCols = part.getSd().getCols();
+              part.getSd().setCols(newt.getSd().getCols());
+              ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, dbname, name,
+                  part.getValues(), oldCols, oldt, part);
+              assert(colStats == null);
+              msdb.alterPartition(dbname, name, part.getValues(), part);
+            }
+            msdb.alterTable(dbname, name, newt);
+          } else {
+            LOG.warn("Alter table does not cascade changes to its partitions.");
+          }
+        } else {
+          alterTableUpdateTableColumnStats(msdb, oldt, newt);
+        }
+      }
+
+      if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
+        if (oldt.getDbName().equalsIgnoreCase(newt.getDbName())) {
+          MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                  EventMessage.EventType.ALTER_TABLE,
+                  new AlterTableEvent(oldt, newt, false, true, handler),
+                  environmentContext);
+        } else {
+          MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                  EventMessage.EventType.DROP_TABLE,
+                  new DropTableEvent(oldt, true, false, handler),
+                  environmentContext);
+          MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                  EventMessage.EventType.CREATE_TABLE,
+                  new CreateTableEvent(newt, true, handler),
+                  environmentContext);
+          if (isPartitionedTable) {
+            parts = msdb.getPartitions(newt.getDbName(), newt.getTableName(), -1);
+            MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                    EventMessage.EventType.ADD_PARTITION,
+                    new AddPartitionEvent(newt, parts, true, handler),
+                    environmentContext);
+          }
+        }
+      }
+      // commit the changes
+      success = msdb.commitTransaction();
+    } catch (InvalidObjectException e) {
+      LOG.debug("Failed to get object from Metastore ", e);
+      throw new InvalidOperationException(
+          "Unable to change partition or table."
+              + " Check metastore logs for detailed stack." + e.getMessage());
+    } catch (InvalidInputException e) {
+        LOG.debug("Accessing Metastore failed due to invalid input ", e);
+        throw new InvalidOperationException(
+            "Unable to change partition or table."
+                + " Check metastore logs for detailed stack." + e.getMessage());
+    } catch (NoSuchObjectException e) {
+      LOG.debug("Object not found in metastore ", e);
+      throw new InvalidOperationException(
+          "Unable to change partition or table. Database " + dbname + " does not exist"
+              + " Check metastore logs for detailed stack." + e.getMessage());
+    } finally {
+      if (!success) {
+        LOG.error("Failed to alter table " + dbname + "." + name);
+        msdb.rollbackTransaction();
+        if (dataWasMoved) {
+          try {
+            if (destFs.exists(destPath)) {
+              if (!destFs.rename(destPath, srcPath)) {
+                LOG.error("Failed to restore data from " + destPath + " to " + srcPath
+                    + " in alter table failure. Manual restore is needed.");
+              }
+            }
+          } catch (IOException e) {
+            LOG.error("Failed to restore data from " + destPath + " to " + srcPath
+                +  " in alter table failure. Manual restore is needed.");
+          }
+        }
+      }
+    }
+  }
+
+  /**
+   * MetaException that encapsulates error message from RemoteException from hadoop RPC which wrap
+   * the stack trace into e.getMessage() which makes logs/stack traces confusing.
+   * @param ex
+   * @return
+   */
+  String getSimpleMessage(Exception ex) {
+    if(ex instanceof MetaException) {
+      String msg = ex.getMessage();
+      if(msg == null || !msg.contains("\n")) {
+        return msg;
+      }
+      return msg.substring(0, msg.indexOf('\n'));
+    }
+    return ex.getMessage();
+  }
+
+  @Override
+  public Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname,
+    final String name, final List<String> part_vals, final Partition new_part,
+    EnvironmentContext environmentContext)
+      throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
+    return alterPartition(msdb, wh, dbname, name, part_vals, new_part, environmentContext, null);
+  }
+
+  @Override
+  public Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname,
+    final String name, final List<String> part_vals, final Partition new_part,
+    EnvironmentContext environmentContext, IHMSHandler handler)
+      throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
+    boolean success = false;
+    Partition oldPart;
+    List<TransactionalMetaStoreEventListener> transactionalListeners = null;
+    if (handler != null) {
+      transactionalListeners = handler.getTransactionalListeners();
+    }
+
+    // Set DDL time to now if not specified
+    if (new_part.getParameters() == null ||
+        new_part.getParameters().get(hive_metastoreConstants.DDL_TIME) == null ||
+        Integer.parseInt(new_part.getParameters().get(hive_metastoreConstants.DDL_TIME)) == 0) {
+      new_part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System
+          .currentTimeMillis() / 1000));
+    }
+
+    Table tbl = msdb.getTable(dbname, name);
+    if (tbl == null) {
+      throw new InvalidObjectException(
+          "Unable to alter partition because table or database does not exist.");
+    }
+
+    //alter partition
+    if (part_vals == null || part_vals.size() == 0) {
+      try {
+        msdb.openTransaction();
+        oldPart = msdb.getPartition(dbname, name, new_part.getValues());
+        if (MetaStoreUtils.requireCalStats(hiveConf, oldPart, new_part, tbl, environmentContext)) {
+          // if stats are same, no need to update
+          if (MetaStoreUtils.isFastStatsSame(oldPart, new_part)) {
+            MetaStoreUtils.updateBasicState(environmentContext, new_part.getParameters());
+          } else {
+            MetaStoreUtils.updatePartitionStatsFast(new_part, wh, false, true, environmentContext);
+          }
+        }
+
+        // PartitionView does not have SD. We do not need update its column stats
+        if (oldPart.getSd() != null) {
+          updateOrGetPartitionColumnStats(msdb, dbname, name, new_part.getValues(),
+              oldPart.getSd().getCols(), tbl, new_part);
+        }
+        msdb.alterPartition(dbname, name, new_part.getValues(), new_part);
+        if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
+          MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                                                EventMessage.EventType.ALTER_PARTITION,
+                                                new AlterPartitionEvent(oldPart, new_part, tbl, false, true, handler),
+                                                environmentContext);
+
+
+        }
+        success = msdb.commitTransaction();
+      } catch (InvalidObjectException e) {
+        throw new InvalidOperationException("alter is not possible");
+      } catch (NoSuchObjectException e){
+        //old partition does not exist
+        throw new InvalidOperationException("alter is not possible");
+      } finally {
+        if(!success) {
+          msdb.rollbackTransaction();
+        }
+      }
+      return oldPart;
+    }
+
+    //rename partition
+    String oldPartLoc;
+    String newPartLoc;
+    Path srcPath = null;
+    Path destPath = null;
+    FileSystem srcFs;
+    FileSystem destFs = null;
+    boolean dataWasMoved = false;
+    try {
+      msdb.openTransaction();
+      try {
+        oldPart = msdb.getPartition(dbname, name, part_vals);
+      } catch (NoSuchObjectException e) {
+        // this means there is no existing partition
+        throw new InvalidObjectException(
+            "Unable to rename partition because old partition does not exist");
+      }
+
+      Partition check_part;
+      try {
+        check_part = msdb.getPartition(dbname, name, new_part.getValues());
+      } catch(NoSuchObjectException e) {
+        // this means there is no existing partition
+        check_part = null;
+      }
+
+      if (check_part != null) {
+        throw new AlreadyExistsException("Partition already exists:" + dbname + "." + name + "." +
+            new_part.getValues());
+      }
+
+      // when renaming a partition, we should update
+      // 1) partition SD Location
+      // 2) partition column stats if there are any because of part_name field in HMS table PART_COL_STATS
+      // 3) rename the partition directory if it is not an external table
+      if (!tbl.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) {
+        try {
+          // if tbl location is available use it
+          // else derive the tbl location from database location
+          destPath = wh.getPartitionPath(msdb.getDatabase(dbname), tbl, new_part.getValues());
+          destPath = constructRenamedPath(destPath, new Path(new_part.getSd().getLocation()));
+        } catch (NoSuchObjectException e) {
+          LOG.debug("Didn't find object in metastore ", e);
+          throw new InvalidOperationException(
+            "Unable to change partition or table. Database " + dbname + " does not exist"
+              + " Check metastore logs for detailed stack." + e.getMessage());
+        }
+
+        if (destPath != null) {
+          newPartLoc = destPath.toString();
+          oldPartLoc = oldPart.getSd().getLocation();
+          LOG.info("srcPath:" + oldPartLoc);
+          LOG.info("descPath:" + newPartLoc);
+          srcPath = new Path(oldPartLoc);
+          srcFs = wh.getFs(srcPath);
+          destFs = wh.getFs(destPath);
+          // check that src and dest are on the same file system
+          if (!FileUtils.equalsFileSystem(srcFs, destFs)) {
+            throw new InvalidOperationException("New table location " + destPath
+              + " is on a different file system than the old location "
+              + srcPath + ". This operation is not supported.");
+          }
+
+          try {
+            if (srcFs.exists(srcPath)) {
+              if (newPartLoc.compareTo(oldPartLoc) != 0 && destFs.exists(destPath)) {
+                throw new InvalidOperationException("New location for this table "
+                  + tbl.getDbName() + "." + tbl.getTableName()
+                  + " already exists : " + destPath);
+              }
+              //if destPath's parent path doesn't exist, we should mkdir it
+              Path destParentPath = destPath.getParent();
+              if (!wh.mkdirs(destParentPath)) {
+                  throw new MetaException("Unable to create path " + destParentPath);
+              }
+
+              //rename the data directory
+              wh.renameDir(srcPath, destPath, true);
+              LOG.info("Partition directory rename from " + srcPath + " to " + destPath + " done.");
+              dataWasMoved = true;
+            }
+          } catch (IOException e) {
+            LOG.error("Cannot rename partition directory from " + srcPath + " to " + destPath, e);
+            throw new InvalidOperationException("Unable to access src or dest location for partition "
+                + tbl.getDbName() + "." + tbl.getTableName() + " " + new_part.getValues());
+          } catch (MetaException me) {
+            LOG.error("Cannot rename partition directory from " + srcPath + " to " + destPath, me);
+            throw me;
+          }
+
+          new_part.getSd().setLocation(newPartLoc);
+        }
+      } else {
+        new_part.getSd().setLocation(oldPart.getSd().getLocation());
+      }
+
+      if (MetaStoreUtils.requireCalStats(hiveConf, oldPart, new_part, tbl, environmentContext)) {
+        MetaStoreUtils.updatePartitionStatsFast(new_part, wh, false, true, environmentContext);
+      }
+
+      String newPartName = Warehouse.makePartName(tbl.getPartitionKeys(), new_part.getValues());
+      ColumnStatistics cs = updateOrGetPartitionColumnStats(msdb, dbname, name, oldPart.getValues(),
+          oldPart.getSd().getCols(), tbl, new_part);
+      msdb.alterPartition(dbname, name, part_vals, new_part);
+      if (cs != null) {
+        cs.getStatsDesc().setPartName(newPartName);
+        try {
+          msdb.updatePartitionColumnStatistics(cs, new_part.getValues());
+        } catch (InvalidInputException iie) {
+          throw new InvalidOperationException("Unable to update partition stats in table rename." + iie);
+        } catch (NoSuchObjectException nsoe) {
+          // It is ok, ignore
+        }
+      }
+
+      if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
+        MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                                              EventMessage.EventType.ALTER_PARTITION,
+                                              new AlterPartitionEvent(oldPart, new_part, tbl, false, true, handler),
+                                              environmentContext);
+      }
+
+      success = msdb.commitTransaction();
+    } finally {
+      if (!success) {
+        LOG.error("Failed to rename a partition. Rollback transaction");
+        msdb.rollbackTransaction();
+        if (dataWasMoved) {
+          LOG.error("Revert the data move in renaming a partition.");
+          try {
+            if (destFs.exists(destPath)) {
+              wh.renameDir(destPath, srcPath, false);
+            }
+          } catch (MetaException me) {
+            LOG.error("Failed to restore partition data from " + destPath + " to " + srcPath
+                +  " in alter partition failure. Manual restore is needed.");
+          } catch (IOException ioe) {
+            LOG.error("Failed to restore partition data from " + destPath + " to " + srcPath
+                +  " in alter partition failure. Manual restore is needed.");
+          }
+        }
+      }
+    }
+    return oldPart;
+  }
+
+  @Override
+  public List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, final String dbname,
+    final String name, final List<Partition> new_parts,
+    EnvironmentContext environmentContext)
+      throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
+    return alterPartitions(msdb, wh, dbname, name, new_parts, environmentContext, null);
+  }
+
+  @Override
+  public List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, final String dbname,
+    final String name, final List<Partition> new_parts, EnvironmentContext environmentContext,
+    IHMSHandler handler)
+      throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
+    List<Partition> oldParts = new ArrayList<>();
+    List<List<String>> partValsList = new ArrayList<>();
+    List<TransactionalMetaStoreEventListener> transactionalListeners = null;
+    if (handler != null) {
+      transactionalListeners = handler.getTransactionalListeners();
+    }
+
+    Table tbl = msdb.getTable(dbname, name);
+    if (tbl == null) {
+      throw new InvalidObjectException(
+          "Unable to alter partitions because table or database does not exist.");
+    }
+
+    boolean success = false;
+    try {
+      msdb.openTransaction();
+      for (Partition tmpPart: new_parts) {
+        // Set DDL time to now if not specified
+        if (tmpPart.getParameters() == null ||
+            tmpPart.getParameters().get(hive_metastoreConstants.DDL_TIME) == null ||
+            Integer.parseInt(tmpPart.getParameters().get(hive_metastoreConstants.DDL_TIME)) == 0) {
+          tmpPart.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System
+              .currentTimeMillis() / 1000));
+        }
+
+        Partition oldTmpPart = msdb.getPartition(dbname, name, tmpPart.getValues());
+        oldParts.add(oldTmpPart);
+        partValsList.add(tmpPart.getValues());
+
+        if (MetaStoreUtils.requireCalStats(hiveConf, oldTmpPart, tmpPart, tbl, environmentContext)) {
+          // Check if stats are same, no need to update
+          if (MetaStoreUtils.isFastStatsSame(oldTmpPart, tmpPart)) {
+            MetaStoreUtils.updateBasicState(environmentContext, tmpPart.getParameters());
+          } else {
+            MetaStoreUtils.updatePartitionStatsFast(tmpPart, wh, false, true, environmentContext);
+          }
+        }
+
+        // PartitionView does not have SD and we do not need to update its column stats
+        if (oldTmpPart.getSd() != null) {
+          updateOrGetPartitionColumnStats(msdb, dbname, name, oldTmpPart.getValues(),
+              oldTmpPart.getSd().getCols(), tbl, tmpPart);
+        }
+      }
+
+      msdb.alterPartitions(dbname, name, partValsList, new_parts);
+      Iterator<Partition> oldPartsIt = oldParts.iterator();
+      for (Partition newPart : new_parts) {
+        Partition oldPart;
+        if (oldPartsIt.hasNext()) {
+          oldPart = oldPartsIt.next();
+        } else {
+          throw new InvalidOperationException("Missing old partition corresponding to new partition " +
+              "when invoking MetaStoreEventListener for alterPartitions event.");
+        }
+
+        if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
+          MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                                                EventMessage.EventType.ALTER_PARTITION,
+                                                new AlterPartitionEvent(oldPart, newPart, tbl, false, true, handler));
+        }
+      }
+
+      success = msdb.commitTransaction();
+    } catch (InvalidObjectException | NoSuchObjectException e) {
+      throw new InvalidOperationException("Alter partition operation failed: " + e);
+    } finally {
+      if(!success) {
+        msdb.rollbackTransaction();
+      }
+    }
+
+    return oldParts;
+  }
+
+  private boolean checkPartialPartKeysEqual(List<FieldSchema> oldPartKeys,
+      List<FieldSchema> newPartKeys) {
+    //return true if both are null, or false if one is null and the other isn't
+    if (newPartKeys == null || oldPartKeys == null) {
+      return oldPartKeys == newPartKeys;
+    }
+    if (oldPartKeys.size() != newPartKeys.size()) {
+      return false;
+    }
+    Iterator<FieldSchema> oldPartKeysIter = oldPartKeys.iterator();
+    Iterator<FieldSchema> newPartKeysIter = newPartKeys.iterator();
+    FieldSchema oldFs;
+    FieldSchema newFs;
+    while (oldPartKeysIter.hasNext()) {
+      oldFs = oldPartKeysIter.next();
+      newFs = newPartKeysIter.next();
+      // Alter table can change the type of partition key now.
+      // So check the column name only.
+      if (!oldFs.getName().equals(newFs.getName())) {
+        return false;
+      }
+    }
+
+    return true;
+  }
+
+  /**
+   * Uses the scheme and authority of the object's current location and the path constructed
+   * using the object's new name to construct a path for the object's new location.
+   */
+  private Path constructRenamedPath(Path defaultNewPath, Path currentPath) {
+    URI currentUri = currentPath.toUri();
+
+    return new Path(currentUri.getScheme(), currentUri.getAuthority(),
+        defaultNewPath.toUri().getPath());
+  }
+
+  @VisibleForTesting
+  void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTable)
+      throws MetaException, InvalidObjectException {
+    String dbName = oldTable.getDbName().toLowerCase();
+    String tableName = org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier(oldTable.getTableName());
+    String newDbName = newTable.getDbName().toLowerCase();
+    String newTableName = org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier(newTable.getTableName());
+
+    try {
+      List<FieldSchema> oldCols = oldTable.getSd().getCols();
+      List<FieldSchema> newCols = newTable.getSd().getCols();
+      List<ColumnStatisticsObj> newStatsObjs = new ArrayList<>();
+      ColumnStatistics colStats = null;
+      boolean updateColumnStats = true;
+
+      // Nothing to update if everything is the same
+        if (newDbName.equals(dbName) &&
+            newTableName.equals(tableName) &&
+            MetaStoreUtils.columnsIncludedByNameType(oldCols, newCols)) {
+          updateColumnStats = false;
+        }
+
+        if (updateColumnStats) {
+          List<String> oldColNames = new ArrayList<>(oldCols.size());
+          for (FieldSchema oldCol : oldCols) {
+            oldColNames.add(oldCol.getName());
+          }
+
+          // Collect column stats which need to be rewritten and remove old stats
+          colStats = msdb.getTableColumnStatistics(dbName, tableName, oldColNames);
+          if (colStats == null) {
+            updateColumnStats = false;
+          } else {
+            List<ColumnStatisticsObj> statsObjs = colStats.getStatsObj();
+            if (statsObjs != null) {
+              List<String> deletedCols = new ArrayList<>();
+              for (ColumnStatisticsObj statsObj : statsObjs) {
+                boolean found = false;
+                for (FieldSchema newCol : newCols) {
+                  if (statsObj.getColName().equalsIgnoreCase(newCol.getName())
+                      && statsObj.getColType().equalsIgnoreCase(newCol.getType())) {
+                    found = true;
+                    break;
+                  }
+                }
+
+                if (found) {
+                  if (!newDbName.equals(dbName) || !newTableName.equals(tableName)) {
+                    msdb.deleteTableColumnStatistics(dbName, tableName, statsObj.getColName());
+                    newStatsObjs.add(statsObj);
+                    deletedCols.add(statsObj.getColName());
+                  }
+                } else {
+                  msdb.deleteTableColumnStatistics(dbName, tableName, statsObj.getColName());
+                  deletedCols.add(statsObj.getColName());
+                }
+              }
+              StatsSetupConst.removeColumnStatsState(newTable.getParameters(), deletedCols);
+            }
+          }
+        }
+
+        // Change to new table and append stats for the new table
+        msdb.alterTable(dbName, tableName, newTable);
+        if (updateColumnStats && !newStatsObjs.isEmpty()) {
+          ColumnStatisticsDesc statsDesc = colStats.getStatsDesc();
+          statsDesc.setDbName(newDbName);
+          statsDesc.setTableName(newTableName);
+          colStats.setStatsObj(newStatsObjs);
+          msdb.updateTableColumnStatistics(colStats);
+        }
+    } catch (NoSuchObjectException nsoe) {
+      LOG.debug("Could not find db entry." + nsoe);
+    } catch (InvalidInputException e) {
+      //should not happen since the input were verified before passed in
+      throw new InvalidObjectException("Invalid inputs to update table column stats: " + e);
+    }
+  }
+
+  private ColumnStatistics updateOrGetPartitionColumnStats(
+      RawStore msdb, String dbname, String tblname, List<String> partVals,
+      List<FieldSchema> oldCols, Table table, Partition part)
+          throws MetaException, InvalidObjectException {
+    ColumnStatistics newPartsColStats = null;
+    try {
+      List<FieldSchema> newCols = part.getSd() == null ?
+          new ArrayList<>() : part.getSd().getCols();
+      String oldPartName = Warehouse.makePartName(table.getPartitionKeys(), partVals);
+      String newPartName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues());
+      boolean rename = !part.getDbName().equals(dbname) || !part.getTableName().equals(tblname)
+          || !oldPartName.equals(newPartName);
+
+      // do not need to update column stats if alter partition is not for rename or changing existing columns
+      if (!rename && MetaStoreUtils.columnsIncludedByNameType(oldCols, newCols)) {
+        return newPartsColStats;
+      }
+      List<String> oldColNames = new ArrayList<>(oldCols.size());
+      for (FieldSchema oldCol : oldCols) {
+        oldColNames.add(oldCol.getName());
+      }
+      List<String> oldPartNames = Lists.newArrayList(oldPartName);
+      List<ColumnStatistics> partsColStats = msdb.getPartitionColumnStatistics(dbname, tblname,
+          oldPartNames, oldColNames);
+      assert (partsColStats.size() <= 1);
+      for (ColumnStatistics partColStats : partsColStats) { //actually only at most one loop
+        List<ColumnStatisticsObj> newStatsObjs = new ArrayList<>();
+        List<ColumnStatisticsObj> statsObjs = partColStats.getStatsObj();
+        List<String> deletedCols = new ArrayList<>();
+        for (ColumnStatisticsObj statsObj : statsObjs) {
+          boolean found =false;
+          for (FieldSchema newCol : newCols) {
+            if (statsObj.getColName().equalsIgnoreCase(newCol.getName())
+                && statsObj.getColType().equalsIgnoreCase(newCol.getType())) {
+              found = true;
+              break;
+            }
+          }
+          if (found) {
+            if (rename) {
+              msdb.deletePartitionColumnStatistics(dbname, tblname, partColStats.getStatsDesc().getPartName(),
+                  partVals, statsObj.getColName());
+              newStatsObjs.add(statsObj);
+            }
+          } else {
+            msdb.deletePartitionColumnStatistics(dbname, tblname, partColStats.getStatsDesc().getPartName(),
+                partVals, statsObj.getColName());
+            deletedCols.add(statsObj.getColName());
+          }
+        }
+        StatsSetupConst.removeColumnStatsState(part.getParameters(), deletedCols);
+        if (!newStatsObjs.isEmpty()) {
+          partColStats.setStatsObj(newStatsObjs);
+          newPartsColStats = partColStats;
+        }
+      }
+    } catch (NoSuchObjectException nsoe) {
+      // ignore this exception, actually this exception won't be thrown from getPartitionColumnStatistics
+    } catch (InvalidInputException iie) {
+      throw new InvalidObjectException("Invalid input to delete partition column stats." + iie);
+    }
+
+    return newPartsColStats;
+  }
+
+  private void checkColTypeChangeCompatible(List<FieldSchema> oldCols, List<FieldSchema> newCols)
+      throws InvalidOperationException {
+    List<String> incompatibleCols = new ArrayList<>();
+    int maxCols = Math.min(oldCols.size(), newCols.size());
+    for (int i = 0; i < maxCols; i++) {
+      if (!ColumnType.areColTypesCompatible(
+          ColumnType.getTypeName(oldCols.get(i).getType()),
+          ColumnType.getTypeName(newCols.get(i).getType()))) {
+        incompatibleCols.add(newCols.get(i).getName());
+      }
+    }
+    if (!incompatibleCols.isEmpty()) {
+      throw new InvalidOperationException(
+          "The following columns have types incompatible with the existing " +
+              "columns in their respective positions :\n" +
+              org.apache.commons.lang.StringUtils.join(incompatibleCols, ',')
+      );
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/c5a9673a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
index 633b3c7..85bdc4d 100644
--- a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
+++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
@@ -18,11 +18,70 @@
 
 package org.apache.hadoop.hive.metastore;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.hive.metastore.api.Database;
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
 
+import java.util.List;
+
+/**
+ * An interface wrapper for HMSHandler.  This interface contains methods that need to be
+ * called by internal classes but that are not part of the thrift interface.
+ */
+@InterfaceAudience.Private
 public interface IHMSHandler extends ThriftHiveMetastore.Iface, Configurable {
 
   void init() throws MetaException;
+
+  /**
+   * Get the id of the thread of this handler.
+   * @return thread id
+   */
+  int getThreadId();
+
+  /**
+   * Get a reference to the underlying RawStore.
+   * @return the RawStore instance.
+   * @throws MetaException if the creation of a new RawStore object is necessary but fails.
+   */
+  RawStore getMS() throws MetaException;
+
+  /**
+   * Get a reference to Hive's warehouse object (the class that does all the physical operations).
+   * @return Warehouse instance.
+   */
+  Warehouse getWh();
+
+  /**
+   * Equivalent to get_database, but does not write to audit logs, or fire pre-event listeners.
+   * Meant to be used for internal hive classes that don't use the thrift interface.
+   * @param name database name
+   * @return database object
+   * @throws NoSuchObjectException If the database does not exist.
+   * @throws MetaException If another error occurs.
+   */
+  Database get_database_core(final String name) throws NoSuchObjectException, MetaException;
+
+  /**
+   * Equivalent of get_table, but does not log audits and fire pre-event listener.
+   * Meant to be used for calls made by other hive classes, that are not using the
+   * thrift interface.
+   * @param dbname database name
+   * @param name table name
+   * @return Table object
+   * @throws NoSuchObjectException If the table does not exist.
+   * @throws MetaException  If another error occurs.
+   */
+  Table get_table_core(final String dbname, final String name) throws MetaException,
+      NoSuchObjectException;
+
+  /**
+   * Get a list of all transactional listeners.
+   * @return list of listeners.
+   */
+  List<TransactionalMetaStoreEventListener> getTransactionalListeners();
 }